]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.49-201112041811.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.49-201112041811.patch
CommitLineData
627cd425
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index a19b0e8..f773d59 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,42 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+gcc-plugins:
251+ $(Q)$(MAKE) $(build)=tools/gcc
252+else
253+gcc-plugins:
254+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
255+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
256+else
257+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
258+endif
259+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
260+endif
261+endif
262+
263 include $(srctree)/arch/$(SRCARCH)/Makefile
264
265 ifneq ($(CONFIG_FRAME_WARN),0)
266@@ -647,7 +684,7 @@ export mod_strip_cmd
267
268
269 ifeq ($(KBUILD_EXTMOD),)
270-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
271+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
272
273 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
274 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
275@@ -868,6 +905,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
276
277 # The actual objects are generated when descending,
278 # make sure no implicit rule kicks in
279+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
280 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
281
282 # Handle descending into subdirectories listed in $(vmlinux-dirs)
283@@ -877,7 +915,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
284 # Error messages still appears in the original language
285
286 PHONY += $(vmlinux-dirs)
287-$(vmlinux-dirs): prepare scripts
288+$(vmlinux-dirs): gcc-plugins prepare scripts
289 $(Q)$(MAKE) $(build)=$@
290
291 # Build the kernel release string
292@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
293 $(Q)$(MAKE) $(build)=. missing-syscalls
294
295 # All the preparing..
296+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
297 prepare: prepare0
298
299 # The asm symlink changes when $(ARCH) changes.
300@@ -1127,6 +1166,7 @@ all: modules
301 # using awk while concatenating to the final file.
302
303 PHONY += modules
304+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308@@ -1136,7 +1176,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312-modules_prepare: prepare scripts
313+modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317@@ -1201,7 +1241,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
318 include/linux/autoconf.h include/linux/version.h \
319 include/linux/utsrelease.h \
320 include/linux/bounds.h include/asm*/asm-offsets.h \
321- Module.symvers Module.markers tags TAGS cscope*
322+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
323
324 # clean - Delete most, but leave enough to build external modules
325 #
326@@ -1245,7 +1285,7 @@ distclean: mrproper
327 @find $(srctree) $(RCS_FIND_IGNORE) \
328 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
329 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
330- -o -name '.*.rej' -o -size 0 \
331+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
332 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
333 -type f -print | xargs rm -f
334
335@@ -1292,6 +1332,7 @@ help:
336 @echo ' modules_prepare - Set up for building external modules'
337 @echo ' tags/TAGS - Generate tags file for editors'
338 @echo ' cscope - Generate cscope index'
339+ @echo ' gtags - Generate GNU GLOBAL index'
340 @echo ' kernelrelease - Output the release version string'
341 @echo ' kernelversion - Output the version stored in Makefile'
342 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
343@@ -1393,6 +1434,7 @@ PHONY += $(module-dirs) modules
344 $(module-dirs): crmodverdir $(objtree)/Module.symvers
345 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
346
347+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
348 modules: $(module-dirs)
349 @$(kecho) ' Building modules, stage 2.';
350 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
351@@ -1448,7 +1490,7 @@ endif # KBUILD_EXTMOD
352 quiet_cmd_tags = GEN $@
353 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
354
355-tags TAGS cscope: FORCE
356+tags TAGS cscope gtags: FORCE
357 $(call cmd,tags)
358
359 # Scripts to check various things for consistency
360@@ -1513,17 +1555,19 @@ else
361 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
362 endif
363
364-%.s: %.c prepare scripts FORCE
365+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
366+%.s: %.c gcc-plugins prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368 %.i: %.c prepare scripts FORCE
369 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
370-%.o: %.c prepare scripts FORCE
371+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
372+%.o: %.c gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.lst: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
376-%.s: %.S prepare scripts FORCE
377+%.s: %.S gcc-plugins prepare scripts FORCE
378 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
379-%.o: %.S prepare scripts FORCE
380+%.o: %.S gcc-plugins prepare scripts FORCE
381 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
382 %.symtypes: %.c prepare scripts FORCE
383 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
384@@ -1533,11 +1577,13 @@ endif
385 $(cmd_crmodverdir)
386 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
387 $(build)=$(build-dir)
388-%/: prepare scripts FORCE
389+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
390+%/: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir)
394-%.ko: prepare scripts FORCE
395+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
396+%.ko: gcc-plugins prepare scripts FORCE
397 $(cmd_crmodverdir)
398 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
399 $(build)=$(build-dir) $(@:.ko=.o)
400diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
401index 5c75c1b..c82f878 100644
402--- a/arch/alpha/include/asm/elf.h
403+++ b/arch/alpha/include/asm/elf.h
404@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
405
406 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
407
408+#ifdef CONFIG_PAX_ASLR
409+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
410+
411+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
412+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
413+#endif
414+
415 /* $0 is set by ld.so to a pointer to a function which might be
416 registered using atexit. This provides a mean for the dynamic
417 linker to call DT_FINI functions for shared libraries that have
418diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
419index 3f0c59f..cf1e100 100644
420--- a/arch/alpha/include/asm/pgtable.h
421+++ b/arch/alpha/include/asm/pgtable.h
422@@ -101,6 +101,17 @@ struct vm_area_struct;
423 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
424 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
426+
427+#ifdef CONFIG_PAX_PAGEEXEC
428+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
429+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
431+#else
432+# define PAGE_SHARED_NOEXEC PAGE_SHARED
433+# define PAGE_COPY_NOEXEC PAGE_COPY
434+# define PAGE_READONLY_NOEXEC PAGE_READONLY
435+#endif
436+
437 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
438
439 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
440diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
441index ebc3c89..20cfa63 100644
442--- a/arch/alpha/kernel/module.c
443+++ b/arch/alpha/kernel/module.c
444@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
445
446 /* The small sections were sorted to the end of the segment.
447 The following should definitely cover them. */
448- gp = (u64)me->module_core + me->core_size - 0x8000;
449+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
450 got = sechdrs[me->arch.gotsecindex].sh_addr;
451
452 for (i = 0; i < n; i++) {
453diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
454index a94e49c..d71dd44 100644
455--- a/arch/alpha/kernel/osf_sys.c
456+++ b/arch/alpha/kernel/osf_sys.c
457@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
458 /* At this point: (!vma || addr < vma->vm_end). */
459 if (limit - len < addr)
460 return -ENOMEM;
461- if (!vma || addr + len <= vma->vm_start)
462+ if (check_heap_stack_gap(vma, addr, len))
463 return addr;
464 addr = vma->vm_end;
465 vma = vma->vm_next;
466@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
467 merely specific addresses, but regions of memory -- perhaps
468 this feature should be incorporated into all ports? */
469
470+#ifdef CONFIG_PAX_RANDMMAP
471+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
472+#endif
473+
474 if (addr) {
475 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
476 if (addr != (unsigned long) -ENOMEM)
477@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
478 }
479
480 /* Next, try allocating at TASK_UNMAPPED_BASE. */
481- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
482- len, limit);
483+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
484+
485 if (addr != (unsigned long) -ENOMEM)
486 return addr;
487
488diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
489index 00a31de..2ded0f2 100644
490--- a/arch/alpha/mm/fault.c
491+++ b/arch/alpha/mm/fault.c
492@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
493 __reload_thread(pcb);
494 }
495
496+#ifdef CONFIG_PAX_PAGEEXEC
497+/*
498+ * PaX: decide what to do with offenders (regs->pc = fault address)
499+ *
500+ * returns 1 when task should be killed
501+ * 2 when patched PLT trampoline was detected
502+ * 3 when unpatched PLT trampoline was detected
503+ */
504+static int pax_handle_fetch_fault(struct pt_regs *regs)
505+{
506+
507+#ifdef CONFIG_PAX_EMUPLT
508+ int err;
509+
510+ do { /* PaX: patched PLT emulation #1 */
511+ unsigned int ldah, ldq, jmp;
512+
513+ err = get_user(ldah, (unsigned int *)regs->pc);
514+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
515+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
516+
517+ if (err)
518+ break;
519+
520+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
521+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
522+ jmp == 0x6BFB0000U)
523+ {
524+ unsigned long r27, addr;
525+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
526+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
527+
528+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
529+ err = get_user(r27, (unsigned long *)addr);
530+ if (err)
531+ break;
532+
533+ regs->r27 = r27;
534+ regs->pc = r27;
535+ return 2;
536+ }
537+ } while (0);
538+
539+ do { /* PaX: patched PLT emulation #2 */
540+ unsigned int ldah, lda, br;
541+
542+ err = get_user(ldah, (unsigned int *)regs->pc);
543+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
544+ err |= get_user(br, (unsigned int *)(regs->pc+8));
545+
546+ if (err)
547+ break;
548+
549+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
550+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
551+ (br & 0xFFE00000U) == 0xC3E00000U)
552+ {
553+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
554+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
555+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
556+
557+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
558+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
559+ return 2;
560+ }
561+ } while (0);
562+
563+ do { /* PaX: unpatched PLT emulation */
564+ unsigned int br;
565+
566+ err = get_user(br, (unsigned int *)regs->pc);
567+
568+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
569+ unsigned int br2, ldq, nop, jmp;
570+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
571+
572+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
573+ err = get_user(br2, (unsigned int *)addr);
574+ err |= get_user(ldq, (unsigned int *)(addr+4));
575+ err |= get_user(nop, (unsigned int *)(addr+8));
576+ err |= get_user(jmp, (unsigned int *)(addr+12));
577+ err |= get_user(resolver, (unsigned long *)(addr+16));
578+
579+ if (err)
580+ break;
581+
582+ if (br2 == 0xC3600000U &&
583+ ldq == 0xA77B000CU &&
584+ nop == 0x47FF041FU &&
585+ jmp == 0x6B7B0000U)
586+ {
587+ regs->r28 = regs->pc+4;
588+ regs->r27 = addr+16;
589+ regs->pc = resolver;
590+ return 3;
591+ }
592+ }
593+ } while (0);
594+#endif
595+
596+ return 1;
597+}
598+
599+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
600+{
601+ unsigned long i;
602+
603+ printk(KERN_ERR "PAX: bytes at PC: ");
604+ for (i = 0; i < 5; i++) {
605+ unsigned int c;
606+ if (get_user(c, (unsigned int *)pc+i))
607+ printk(KERN_CONT "???????? ");
608+ else
609+ printk(KERN_CONT "%08x ", c);
610+ }
611+ printk("\n");
612+}
613+#endif
614
615 /*
616 * This routine handles page faults. It determines the address,
617@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
618 good_area:
619 si_code = SEGV_ACCERR;
620 if (cause < 0) {
621- if (!(vma->vm_flags & VM_EXEC))
622+ if (!(vma->vm_flags & VM_EXEC)) {
623+
624+#ifdef CONFIG_PAX_PAGEEXEC
625+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
626+ goto bad_area;
627+
628+ up_read(&mm->mmap_sem);
629+ switch (pax_handle_fetch_fault(regs)) {
630+
631+#ifdef CONFIG_PAX_EMUPLT
632+ case 2:
633+ case 3:
634+ return;
635+#endif
636+
637+ }
638+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
639+ do_group_exit(SIGKILL);
640+#else
641 goto bad_area;
642+#endif
643+
644+ }
645 } else if (!cause) {
646 /* Allow reads even for write-only mappings */
647 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
648diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
649index 6aac3f5..265536b 100644
650--- a/arch/arm/include/asm/elf.h
651+++ b/arch/arm/include/asm/elf.h
652@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
653 the loader. We need to make sure that it is out of the way of the program
654 that it will "exec", and that there is sufficient room for the brk. */
655
656-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
657+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
658+
659+#ifdef CONFIG_PAX_ASLR
660+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
661+
662+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
663+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
664+#endif
665
666 /* When the program starts, a1 contains a pointer to a function to be
667 registered with atexit, as per the SVR4 ABI. A value of 0 means we
668diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
669index c019949..388fdd1 100644
670--- a/arch/arm/include/asm/kmap_types.h
671+++ b/arch/arm/include/asm/kmap_types.h
672@@ -19,6 +19,7 @@ enum km_type {
673 KM_SOFTIRQ0,
674 KM_SOFTIRQ1,
675 KM_L2_CACHE,
676+ KM_CLEARPAGE,
677 KM_TYPE_NR
678 };
679
680diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
681index 1d6bd40..fba0cb9 100644
682--- a/arch/arm/include/asm/uaccess.h
683+++ b/arch/arm/include/asm/uaccess.h
684@@ -22,6 +22,8 @@
685 #define VERIFY_READ 0
686 #define VERIFY_WRITE 1
687
688+extern void check_object_size(const void *ptr, unsigned long n, bool to);
689+
690 /*
691 * The exception table consists of pairs of addresses: the first is the
692 * address of an instruction that is allowed to fault, and the second is
693@@ -387,8 +389,23 @@ do { \
694
695
696 #ifdef CONFIG_MMU
697-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
698-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
699+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
700+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
701+
702+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
703+{
704+ if (!__builtin_constant_p(n))
705+ check_object_size(to, n, false);
706+ return ___copy_from_user(to, from, n);
707+}
708+
709+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
710+{
711+ if (!__builtin_constant_p(n))
712+ check_object_size(from, n, true);
713+ return ___copy_to_user(to, from, n);
714+}
715+
716 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
717 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
718 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
719@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
720
721 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
722 {
723+ if ((long)n < 0)
724+ return n;
725+
726 if (access_ok(VERIFY_READ, from, n))
727 n = __copy_from_user(to, from, n);
728 else /* security hole - plug it */
729@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
730
731 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
732 {
733+ if ((long)n < 0)
734+ return n;
735+
736 if (access_ok(VERIFY_WRITE, to, n))
737 n = __copy_to_user(to, from, n);
738 return n;
739diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
740index 0e62770..e2c2cd6 100644
741--- a/arch/arm/kernel/armksyms.c
742+++ b/arch/arm/kernel/armksyms.c
743@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
744 #ifdef CONFIG_MMU
745 EXPORT_SYMBOL(copy_page);
746
747-EXPORT_SYMBOL(__copy_from_user);
748-EXPORT_SYMBOL(__copy_to_user);
749+EXPORT_SYMBOL(___copy_from_user);
750+EXPORT_SYMBOL(___copy_to_user);
751 EXPORT_SYMBOL(__clear_user);
752
753 EXPORT_SYMBOL(__get_user_1);
754diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
755index ba8ccfe..2dc34dc 100644
756--- a/arch/arm/kernel/kgdb.c
757+++ b/arch/arm/kernel/kgdb.c
758@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
759 * and we handle the normal undef case within the do_undefinstr
760 * handler.
761 */
762-struct kgdb_arch arch_kgdb_ops = {
763+const struct kgdb_arch arch_kgdb_ops = {
764 #ifndef __ARMEB__
765 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
766 #else /* ! __ARMEB__ */
767diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
768index 3f361a7..6e806e1 100644
769--- a/arch/arm/kernel/traps.c
770+++ b/arch/arm/kernel/traps.c
771@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
772
773 DEFINE_SPINLOCK(die_lock);
774
775+extern void gr_handle_kernel_exploit(void);
776+
777 /*
778 * This function is protected against re-entrancy.
779 */
780@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
781 if (panic_on_oops)
782 panic("Fatal exception");
783
784+ gr_handle_kernel_exploit();
785+
786 do_exit(SIGSEGV);
787 }
788
789diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
790index e4fe124..0fc246b 100644
791--- a/arch/arm/lib/copy_from_user.S
792+++ b/arch/arm/lib/copy_from_user.S
793@@ -16,7 +16,7 @@
794 /*
795 * Prototype:
796 *
797- * size_t __copy_from_user(void *to, const void *from, size_t n)
798+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
799 *
800 * Purpose:
801 *
802@@ -84,11 +84,11 @@
803
804 .text
805
806-ENTRY(__copy_from_user)
807+ENTRY(___copy_from_user)
808
809 #include "copy_template.S"
810
811-ENDPROC(__copy_from_user)
812+ENDPROC(___copy_from_user)
813
814 .section .fixup,"ax"
815 .align 0
816diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
817index 1a71e15..ac7b258 100644
818--- a/arch/arm/lib/copy_to_user.S
819+++ b/arch/arm/lib/copy_to_user.S
820@@ -16,7 +16,7 @@
821 /*
822 * Prototype:
823 *
824- * size_t __copy_to_user(void *to, const void *from, size_t n)
825+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
826 *
827 * Purpose:
828 *
829@@ -88,11 +88,11 @@
830 .text
831
832 ENTRY(__copy_to_user_std)
833-WEAK(__copy_to_user)
834+WEAK(___copy_to_user)
835
836 #include "copy_template.S"
837
838-ENDPROC(__copy_to_user)
839+ENDPROC(___copy_to_user)
840
841 .section .fixup,"ax"
842 .align 0
843diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
844index ffdd274..91017b6 100644
845--- a/arch/arm/lib/uaccess.S
846+++ b/arch/arm/lib/uaccess.S
847@@ -19,7 +19,7 @@
848
849 #define PAGE_SHIFT 12
850
851-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
852+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
853 * Purpose : copy a block to user memory from kernel memory
854 * Params : to - user memory
855 * : from - kernel memory
856@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
857 sub r2, r2, ip
858 b .Lc2u_dest_aligned
859
860-ENTRY(__copy_to_user)
861+ENTRY(___copy_to_user)
862 stmfd sp!, {r2, r4 - r7, lr}
863 cmp r2, #4
864 blt .Lc2u_not_enough
865@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
866 ldrgtb r3, [r1], #0
867 USER( strgtbt r3, [r0], #1) @ May fault
868 b .Lc2u_finished
869-ENDPROC(__copy_to_user)
870+ENDPROC(___copy_to_user)
871
872 .section .fixup,"ax"
873 .align 0
874 9001: ldmfd sp!, {r0, r4 - r7, pc}
875 .previous
876
877-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
878+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
879 * Purpose : copy a block from user memory to kernel memory
880 * Params : to - kernel memory
881 * : from - user memory
882@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
883 sub r2, r2, ip
884 b .Lcfu_dest_aligned
885
886-ENTRY(__copy_from_user)
887+ENTRY(___copy_from_user)
888 stmfd sp!, {r0, r2, r4 - r7, lr}
889 cmp r2, #4
890 blt .Lcfu_not_enough
891@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
892 USER( ldrgtbt r3, [r1], #1) @ May fault
893 strgtb r3, [r0], #1
894 b .Lcfu_finished
895-ENDPROC(__copy_from_user)
896+ENDPROC(___copy_from_user)
897
898 .section .fixup,"ax"
899 .align 0
900diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
901index 6b967ff..67d5b2b 100644
902--- a/arch/arm/lib/uaccess_with_memcpy.c
903+++ b/arch/arm/lib/uaccess_with_memcpy.c
904@@ -97,7 +97,7 @@ out:
905 }
906
907 unsigned long
908-__copy_to_user(void __user *to, const void *from, unsigned long n)
909+___copy_to_user(void __user *to, const void *from, unsigned long n)
910 {
911 /*
912 * This test is stubbed out of the main function above to keep
913diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
914index 4028724..beec230 100644
915--- a/arch/arm/mach-at91/pm.c
916+++ b/arch/arm/mach-at91/pm.c
917@@ -348,7 +348,7 @@ static void at91_pm_end(void)
918 }
919
920
921-static struct platform_suspend_ops at91_pm_ops ={
922+static const struct platform_suspend_ops at91_pm_ops ={
923 .valid = at91_pm_valid_state,
924 .begin = at91_pm_begin,
925 .enter = at91_pm_enter,
926diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
927index 5218943..0a34552 100644
928--- a/arch/arm/mach-omap1/pm.c
929+++ b/arch/arm/mach-omap1/pm.c
930@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
931
932
933
934-static struct platform_suspend_ops omap_pm_ops ={
935+static const struct platform_suspend_ops omap_pm_ops ={
936 .prepare = omap_pm_prepare,
937 .enter = omap_pm_enter,
938 .finish = omap_pm_finish,
939diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
940index bff5c4e..d4c649b 100644
941--- a/arch/arm/mach-omap2/pm24xx.c
942+++ b/arch/arm/mach-omap2/pm24xx.c
943@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
944 enable_hlt();
945 }
946
947-static struct platform_suspend_ops omap_pm_ops = {
948+static const struct platform_suspend_ops omap_pm_ops = {
949 .prepare = omap2_pm_prepare,
950 .enter = omap2_pm_enter,
951 .finish = omap2_pm_finish,
952diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
953index 8946319..7d3e661 100644
954--- a/arch/arm/mach-omap2/pm34xx.c
955+++ b/arch/arm/mach-omap2/pm34xx.c
956@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
957 return;
958 }
959
960-static struct platform_suspend_ops omap_pm_ops = {
961+static const struct platform_suspend_ops omap_pm_ops = {
962 .begin = omap3_pm_begin,
963 .end = omap3_pm_end,
964 .prepare = omap3_pm_prepare,
965diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
966index b3d8d53..6e68ebc 100644
967--- a/arch/arm/mach-pnx4008/pm.c
968+++ b/arch/arm/mach-pnx4008/pm.c
969@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
970 (state == PM_SUSPEND_MEM);
971 }
972
973-static struct platform_suspend_ops pnx4008_pm_ops = {
974+static const struct platform_suspend_ops pnx4008_pm_ops = {
975 .enter = pnx4008_pm_enter,
976 .valid = pnx4008_pm_valid,
977 };
978diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
979index 7693355..9beb00a 100644
980--- a/arch/arm/mach-pxa/pm.c
981+++ b/arch/arm/mach-pxa/pm.c
982@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
983 pxa_cpu_pm_fns->finish();
984 }
985
986-static struct platform_suspend_ops pxa_pm_ops = {
987+static const struct platform_suspend_ops pxa_pm_ops = {
988 .valid = pxa_pm_valid,
989 .enter = pxa_pm_enter,
990 .prepare = pxa_pm_prepare,
991diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
992index 629e05d..06be589 100644
993--- a/arch/arm/mach-pxa/sharpsl_pm.c
994+++ b/arch/arm/mach-pxa/sharpsl_pm.c
995@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
996 }
997
998 #ifdef CONFIG_PM
999-static struct platform_suspend_ops sharpsl_pm_ops = {
1000+static const struct platform_suspend_ops sharpsl_pm_ops = {
1001 .prepare = pxa_pm_prepare,
1002 .finish = pxa_pm_finish,
1003 .enter = corgi_pxa_pm_enter,
1004diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1005index c83fdc8..ab9fc44 100644
1006--- a/arch/arm/mach-sa1100/pm.c
1007+++ b/arch/arm/mach-sa1100/pm.c
1008@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1009 return virt_to_phys(sp);
1010 }
1011
1012-static struct platform_suspend_ops sa11x0_pm_ops = {
1013+static const struct platform_suspend_ops sa11x0_pm_ops = {
1014 .enter = sa11x0_pm_enter,
1015 .valid = suspend_valid_only_mem,
1016 };
1017diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1018index 3191cd6..c0739db 100644
1019--- a/arch/arm/mm/fault.c
1020+++ b/arch/arm/mm/fault.c
1021@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1022 }
1023 #endif
1024
1025+#ifdef CONFIG_PAX_PAGEEXEC
1026+ if (fsr & FSR_LNX_PF) {
1027+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1028+ do_group_exit(SIGKILL);
1029+ }
1030+#endif
1031+
1032 tsk->thread.address = addr;
1033 tsk->thread.error_code = fsr;
1034 tsk->thread.trap_no = 14;
1035@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1036 }
1037 #endif /* CONFIG_MMU */
1038
1039+#ifdef CONFIG_PAX_PAGEEXEC
1040+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1041+{
1042+ long i;
1043+
1044+ printk(KERN_ERR "PAX: bytes at PC: ");
1045+ for (i = 0; i < 20; i++) {
1046+ unsigned char c;
1047+ if (get_user(c, (__force unsigned char __user *)pc+i))
1048+ printk(KERN_CONT "?? ");
1049+ else
1050+ printk(KERN_CONT "%02x ", c);
1051+ }
1052+ printk("\n");
1053+
1054+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1055+ for (i = -1; i < 20; i++) {
1056+ unsigned long c;
1057+ if (get_user(c, (__force unsigned long __user *)sp+i))
1058+ printk(KERN_CONT "???????? ");
1059+ else
1060+ printk(KERN_CONT "%08lx ", c);
1061+ }
1062+ printk("\n");
1063+}
1064+#endif
1065+
1066 /*
1067 * First Level Translation Fault Handler
1068 *
1069diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1070index f5abc51..7ec524c 100644
1071--- a/arch/arm/mm/mmap.c
1072+++ b/arch/arm/mm/mmap.c
1073@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1074 if (len > TASK_SIZE)
1075 return -ENOMEM;
1076
1077+#ifdef CONFIG_PAX_RANDMMAP
1078+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1079+#endif
1080+
1081 if (addr) {
1082 if (do_align)
1083 addr = COLOUR_ALIGN(addr, pgoff);
1084@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1085 addr = PAGE_ALIGN(addr);
1086
1087 vma = find_vma(mm, addr);
1088- if (TASK_SIZE - len >= addr &&
1089- (!vma || addr + len <= vma->vm_start))
1090+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1091 return addr;
1092 }
1093 if (len > mm->cached_hole_size) {
1094- start_addr = addr = mm->free_area_cache;
1095+ start_addr = addr = mm->free_area_cache;
1096 } else {
1097- start_addr = addr = TASK_UNMAPPED_BASE;
1098- mm->cached_hole_size = 0;
1099+ start_addr = addr = mm->mmap_base;
1100+ mm->cached_hole_size = 0;
1101 }
1102
1103 full_search:
1104@@ -94,14 +97,14 @@ full_search:
1105 * Start a new search - just in case we missed
1106 * some holes.
1107 */
1108- if (start_addr != TASK_UNMAPPED_BASE) {
1109- start_addr = addr = TASK_UNMAPPED_BASE;
1110+ if (start_addr != mm->mmap_base) {
1111+ start_addr = addr = mm->mmap_base;
1112 mm->cached_hole_size = 0;
1113 goto full_search;
1114 }
1115 return -ENOMEM;
1116 }
1117- if (!vma || addr + len <= vma->vm_start) {
1118+ if (check_heap_stack_gap(vma, addr, len)) {
1119 /*
1120 * Remember the place where we stopped the search:
1121 */
1122diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1123index 8d97db2..b66cfa5 100644
1124--- a/arch/arm/plat-s3c/pm.c
1125+++ b/arch/arm/plat-s3c/pm.c
1126@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1127 s3c_pm_check_cleanup();
1128 }
1129
1130-static struct platform_suspend_ops s3c_pm_ops = {
1131+static const struct platform_suspend_ops s3c_pm_ops = {
1132 .enter = s3c_pm_enter,
1133 .prepare = s3c_pm_prepare,
1134 .finish = s3c_pm_finish,
1135diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1136index d5d1d41..856e2ed 100644
1137--- a/arch/avr32/include/asm/elf.h
1138+++ b/arch/avr32/include/asm/elf.h
1139@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1140 the loader. We need to make sure that it is out of the way of the program
1141 that it will "exec", and that there is sufficient room for the brk. */
1142
1143-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1144+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1145
1146+#ifdef CONFIG_PAX_ASLR
1147+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1148+
1149+#define PAX_DELTA_MMAP_LEN 15
1150+#define PAX_DELTA_STACK_LEN 15
1151+#endif
1152
1153 /* This yields a mask that user programs can use to figure out what
1154 instruction set this CPU supports. This could be done in user space,
1155diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1156index b7f5c68..556135c 100644
1157--- a/arch/avr32/include/asm/kmap_types.h
1158+++ b/arch/avr32/include/asm/kmap_types.h
1159@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1160 D(11) KM_IRQ1,
1161 D(12) KM_SOFTIRQ0,
1162 D(13) KM_SOFTIRQ1,
1163-D(14) KM_TYPE_NR
1164+D(14) KM_CLEARPAGE,
1165+D(15) KM_TYPE_NR
1166 };
1167
1168 #undef D
1169diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1170index f021edf..32d680e 100644
1171--- a/arch/avr32/mach-at32ap/pm.c
1172+++ b/arch/avr32/mach-at32ap/pm.c
1173@@ -176,7 +176,7 @@ out:
1174 return 0;
1175 }
1176
1177-static struct platform_suspend_ops avr32_pm_ops = {
1178+static const struct platform_suspend_ops avr32_pm_ops = {
1179 .valid = avr32_pm_valid_state,
1180 .enter = avr32_pm_enter,
1181 };
1182diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1183index b61d86d..e292c7f 100644
1184--- a/arch/avr32/mm/fault.c
1185+++ b/arch/avr32/mm/fault.c
1186@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1187
1188 int exception_trace = 1;
1189
1190+#ifdef CONFIG_PAX_PAGEEXEC
1191+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1192+{
1193+ unsigned long i;
1194+
1195+ printk(KERN_ERR "PAX: bytes at PC: ");
1196+ for (i = 0; i < 20; i++) {
1197+ unsigned char c;
1198+ if (get_user(c, (unsigned char *)pc+i))
1199+ printk(KERN_CONT "???????? ");
1200+ else
1201+ printk(KERN_CONT "%02x ", c);
1202+ }
1203+ printk("\n");
1204+}
1205+#endif
1206+
1207 /*
1208 * This routine handles page faults. It determines the address and the
1209 * problem, and then passes it off to one of the appropriate routines.
1210@@ -157,6 +174,16 @@ bad_area:
1211 up_read(&mm->mmap_sem);
1212
1213 if (user_mode(regs)) {
1214+
1215+#ifdef CONFIG_PAX_PAGEEXEC
1216+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1217+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1218+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1219+ do_group_exit(SIGKILL);
1220+ }
1221+ }
1222+#endif
1223+
1224 if (exception_trace && printk_ratelimit())
1225 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1226 "sp %08lx ecr %lu\n",
1227diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1228index cce79d0..c406c85 100644
1229--- a/arch/blackfin/kernel/kgdb.c
1230+++ b/arch/blackfin/kernel/kgdb.c
1231@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1232 return -1; /* this means that we do not want to exit from the handler */
1233 }
1234
1235-struct kgdb_arch arch_kgdb_ops = {
1236+const struct kgdb_arch arch_kgdb_ops = {
1237 .gdb_bpt_instr = {0xa1},
1238 #ifdef CONFIG_SMP
1239 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1240diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1241index 8837be4..b2fb413 100644
1242--- a/arch/blackfin/mach-common/pm.c
1243+++ b/arch/blackfin/mach-common/pm.c
1244@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1245 return 0;
1246 }
1247
1248-struct platform_suspend_ops bfin_pm_ops = {
1249+const struct platform_suspend_ops bfin_pm_ops = {
1250 .enter = bfin_pm_enter,
1251 .valid = bfin_pm_valid,
1252 };
1253diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1254index f8e16b2..c73ff79 100644
1255--- a/arch/frv/include/asm/kmap_types.h
1256+++ b/arch/frv/include/asm/kmap_types.h
1257@@ -23,6 +23,7 @@ enum km_type {
1258 KM_IRQ1,
1259 KM_SOFTIRQ0,
1260 KM_SOFTIRQ1,
1261+ KM_CLEARPAGE,
1262 KM_TYPE_NR
1263 };
1264
1265diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1266index 385fd30..6c3d97e 100644
1267--- a/arch/frv/mm/elf-fdpic.c
1268+++ b/arch/frv/mm/elf-fdpic.c
1269@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1270 if (addr) {
1271 addr = PAGE_ALIGN(addr);
1272 vma = find_vma(current->mm, addr);
1273- if (TASK_SIZE - len >= addr &&
1274- (!vma || addr + len <= vma->vm_start))
1275+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1276 goto success;
1277 }
1278
1279@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1280 for (; vma; vma = vma->vm_next) {
1281 if (addr > limit)
1282 break;
1283- if (addr + len <= vma->vm_start)
1284+ if (check_heap_stack_gap(vma, addr, len))
1285 goto success;
1286 addr = vma->vm_end;
1287 }
1288@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1289 for (; vma; vma = vma->vm_next) {
1290 if (addr > limit)
1291 break;
1292- if (addr + len <= vma->vm_start)
1293+ if (check_heap_stack_gap(vma, addr, len))
1294 goto success;
1295 addr = vma->vm_end;
1296 }
1297diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1298index e4a80d8..11a7ea1 100644
1299--- a/arch/ia64/hp/common/hwsw_iommu.c
1300+++ b/arch/ia64/hp/common/hwsw_iommu.c
1301@@ -17,7 +17,7 @@
1302 #include <linux/swiotlb.h>
1303 #include <asm/machvec.h>
1304
1305-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1306+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1307
1308 /* swiotlb declarations & definitions: */
1309 extern int swiotlb_late_init_with_default_size (size_t size);
1310@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1311 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1312 }
1313
1314-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1315+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1316 {
1317 if (use_swiotlb(dev))
1318 return &swiotlb_dma_ops;
1319diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1320index 01ae69b..35752fd 100644
1321--- a/arch/ia64/hp/common/sba_iommu.c
1322+++ b/arch/ia64/hp/common/sba_iommu.c
1323@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1324 },
1325 };
1326
1327-extern struct dma_map_ops swiotlb_dma_ops;
1328+extern const struct dma_map_ops swiotlb_dma_ops;
1329
1330 static int __init
1331 sba_init(void)
1332@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1333
1334 __setup("sbapagesize=",sba_page_override);
1335
1336-struct dma_map_ops sba_dma_ops = {
1337+const struct dma_map_ops sba_dma_ops = {
1338 .alloc_coherent = sba_alloc_coherent,
1339 .free_coherent = sba_free_coherent,
1340 .map_page = sba_map_page,
1341diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1342index c69552b..c7122f4 100644
1343--- a/arch/ia64/ia32/binfmt_elf32.c
1344+++ b/arch/ia64/ia32/binfmt_elf32.c
1345@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1346
1347 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1348
1349+#ifdef CONFIG_PAX_ASLR
1350+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1351+
1352+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1353+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1354+#endif
1355+
1356 /* Ugly but avoids duplication */
1357 #include "../../../fs/binfmt_elf.c"
1358
1359diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1360index 0f15349..26b3429 100644
1361--- a/arch/ia64/ia32/ia32priv.h
1362+++ b/arch/ia64/ia32/ia32priv.h
1363@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1364 #define ELF_DATA ELFDATA2LSB
1365 #define ELF_ARCH EM_386
1366
1367-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1368+#ifdef CONFIG_PAX_RANDUSTACK
1369+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1370+#else
1371+#define __IA32_DELTA_STACK 0UL
1372+#endif
1373+
1374+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1375+
1376 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1377 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1378
1379diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1380index 8d3c79c..71b3af6 100644
1381--- a/arch/ia64/include/asm/dma-mapping.h
1382+++ b/arch/ia64/include/asm/dma-mapping.h
1383@@ -12,7 +12,7 @@
1384
1385 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1386
1387-extern struct dma_map_ops *dma_ops;
1388+extern const struct dma_map_ops *dma_ops;
1389 extern struct ia64_machine_vector ia64_mv;
1390 extern void set_iommu_machvec(void);
1391
1392@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1393 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1394 dma_addr_t *daddr, gfp_t gfp)
1395 {
1396- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1397+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1398 void *caddr;
1399
1400 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1401@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1402 static inline void dma_free_coherent(struct device *dev, size_t size,
1403 void *caddr, dma_addr_t daddr)
1404 {
1405- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1406+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1407 debug_dma_free_coherent(dev, size, caddr, daddr);
1408 ops->free_coherent(dev, size, caddr, daddr);
1409 }
1410@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1411
1412 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1413 {
1414- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1415+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1416 return ops->mapping_error(dev, daddr);
1417 }
1418
1419 static inline int dma_supported(struct device *dev, u64 mask)
1420 {
1421- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1422+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1423 return ops->dma_supported(dev, mask);
1424 }
1425
1426diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1427index 86eddee..b116bb4 100644
1428--- a/arch/ia64/include/asm/elf.h
1429+++ b/arch/ia64/include/asm/elf.h
1430@@ -43,6 +43,13 @@
1431 */
1432 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1433
1434+#ifdef CONFIG_PAX_ASLR
1435+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1436+
1437+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1438+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1439+#endif
1440+
1441 #define PT_IA_64_UNWIND 0x70000001
1442
1443 /* IA-64 relocations: */
1444diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1445index 367d299..9ad4279 100644
1446--- a/arch/ia64/include/asm/machvec.h
1447+++ b/arch/ia64/include/asm/machvec.h
1448@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1449 /* DMA-mapping interface: */
1450 typedef void ia64_mv_dma_init (void);
1451 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1452-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1453+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1454
1455 /*
1456 * WARNING: The legacy I/O space is _architected_. Platforms are
1457@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1458 # endif /* CONFIG_IA64_GENERIC */
1459
1460 extern void swiotlb_dma_init(void);
1461-extern struct dma_map_ops *dma_get_ops(struct device *);
1462+extern const struct dma_map_ops *dma_get_ops(struct device *);
1463
1464 /*
1465 * Define default versions so we can extend machvec for new platforms without having
1466diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1467index 8840a69..cdb63d9 100644
1468--- a/arch/ia64/include/asm/pgtable.h
1469+++ b/arch/ia64/include/asm/pgtable.h
1470@@ -12,7 +12,7 @@
1471 * David Mosberger-Tang <davidm@hpl.hp.com>
1472 */
1473
1474-
1475+#include <linux/const.h>
1476 #include <asm/mman.h>
1477 #include <asm/page.h>
1478 #include <asm/processor.h>
1479@@ -143,6 +143,17 @@
1480 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1481 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1482 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1483+
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1486+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1487+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1488+#else
1489+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1490+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1491+# define PAGE_COPY_NOEXEC PAGE_COPY
1492+#endif
1493+
1494 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1495 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1496 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1497diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1498index 239ecdc..f94170e 100644
1499--- a/arch/ia64/include/asm/spinlock.h
1500+++ b/arch/ia64/include/asm/spinlock.h
1501@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1502 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1503
1504 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1505- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1506+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1507 }
1508
1509 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1510diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1511index 449c8c0..432a3d2 100644
1512--- a/arch/ia64/include/asm/uaccess.h
1513+++ b/arch/ia64/include/asm/uaccess.h
1514@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1515 const void *__cu_from = (from); \
1516 long __cu_len = (n); \
1517 \
1518- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1519+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1520 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1521 __cu_len; \
1522 })
1523@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1524 long __cu_len = (n); \
1525 \
1526 __chk_user_ptr(__cu_from); \
1527- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1528+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1529 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1530 __cu_len; \
1531 })
1532diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1533index f2c1600..969398a 100644
1534--- a/arch/ia64/kernel/dma-mapping.c
1535+++ b/arch/ia64/kernel/dma-mapping.c
1536@@ -3,7 +3,7 @@
1537 /* Set this to 1 if there is a HW IOMMU in the system */
1538 int iommu_detected __read_mostly;
1539
1540-struct dma_map_ops *dma_ops;
1541+const struct dma_map_ops *dma_ops;
1542 EXPORT_SYMBOL(dma_ops);
1543
1544 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1545@@ -16,7 +16,7 @@ static int __init dma_init(void)
1546 }
1547 fs_initcall(dma_init);
1548
1549-struct dma_map_ops *dma_get_ops(struct device *dev)
1550+const struct dma_map_ops *dma_get_ops(struct device *dev)
1551 {
1552 return dma_ops;
1553 }
1554diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1555index 1481b0a..e7d38ff 100644
1556--- a/arch/ia64/kernel/module.c
1557+++ b/arch/ia64/kernel/module.c
1558@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1559 void
1560 module_free (struct module *mod, void *module_region)
1561 {
1562- if (mod && mod->arch.init_unw_table &&
1563- module_region == mod->module_init) {
1564+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1565 unw_remove_unwind_table(mod->arch.init_unw_table);
1566 mod->arch.init_unw_table = NULL;
1567 }
1568@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1569 }
1570
1571 static inline int
1572+in_init_rx (const struct module *mod, uint64_t addr)
1573+{
1574+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1575+}
1576+
1577+static inline int
1578+in_init_rw (const struct module *mod, uint64_t addr)
1579+{
1580+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1581+}
1582+
1583+static inline int
1584 in_init (const struct module *mod, uint64_t addr)
1585 {
1586- return addr - (uint64_t) mod->module_init < mod->init_size;
1587+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1588+}
1589+
1590+static inline int
1591+in_core_rx (const struct module *mod, uint64_t addr)
1592+{
1593+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1594+}
1595+
1596+static inline int
1597+in_core_rw (const struct module *mod, uint64_t addr)
1598+{
1599+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1600 }
1601
1602 static inline int
1603 in_core (const struct module *mod, uint64_t addr)
1604 {
1605- return addr - (uint64_t) mod->module_core < mod->core_size;
1606+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1607 }
1608
1609 static inline int
1610@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1611 break;
1612
1613 case RV_BDREL:
1614- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1615+ if (in_init_rx(mod, val))
1616+ val -= (uint64_t) mod->module_init_rx;
1617+ else if (in_init_rw(mod, val))
1618+ val -= (uint64_t) mod->module_init_rw;
1619+ else if (in_core_rx(mod, val))
1620+ val -= (uint64_t) mod->module_core_rx;
1621+ else if (in_core_rw(mod, val))
1622+ val -= (uint64_t) mod->module_core_rw;
1623 break;
1624
1625 case RV_LTV:
1626@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1627 * addresses have been selected...
1628 */
1629 uint64_t gp;
1630- if (mod->core_size > MAX_LTOFF)
1631+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1632 /*
1633 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1634 * at the end of the module.
1635 */
1636- gp = mod->core_size - MAX_LTOFF / 2;
1637+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1638 else
1639- gp = mod->core_size / 2;
1640- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1641+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1642+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1643 mod->arch.gp = gp;
1644 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1645 }
1646diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1647index f6b1ff0..de773fb 100644
1648--- a/arch/ia64/kernel/pci-dma.c
1649+++ b/arch/ia64/kernel/pci-dma.c
1650@@ -43,7 +43,7 @@ struct device fallback_dev = {
1651 .dma_mask = &fallback_dev.coherent_dma_mask,
1652 };
1653
1654-extern struct dma_map_ops intel_dma_ops;
1655+extern const struct dma_map_ops intel_dma_ops;
1656
1657 static int __init pci_iommu_init(void)
1658 {
1659@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1660 }
1661 EXPORT_SYMBOL(iommu_dma_supported);
1662
1663+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1664+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1665+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1666+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1667+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1668+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1669+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1670+
1671+static const struct dma_map_ops intel_iommu_dma_ops = {
1672+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1673+ .alloc_coherent = intel_alloc_coherent,
1674+ .free_coherent = intel_free_coherent,
1675+ .map_sg = intel_map_sg,
1676+ .unmap_sg = intel_unmap_sg,
1677+ .map_page = intel_map_page,
1678+ .unmap_page = intel_unmap_page,
1679+ .mapping_error = intel_mapping_error,
1680+
1681+ .sync_single_for_cpu = machvec_dma_sync_single,
1682+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1683+ .sync_single_for_device = machvec_dma_sync_single,
1684+ .sync_sg_for_device = machvec_dma_sync_sg,
1685+ .dma_supported = iommu_dma_supported,
1686+};
1687+
1688 void __init pci_iommu_alloc(void)
1689 {
1690- dma_ops = &intel_dma_ops;
1691-
1692- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1693- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1694- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1695- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1696- dma_ops->dma_supported = iommu_dma_supported;
1697+ dma_ops = &intel_iommu_dma_ops;
1698
1699 /*
1700 * The order of these functions is important for
1701diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1702index 285aae8..61dbab6 100644
1703--- a/arch/ia64/kernel/pci-swiotlb.c
1704+++ b/arch/ia64/kernel/pci-swiotlb.c
1705@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1706 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1707 }
1708
1709-struct dma_map_ops swiotlb_dma_ops = {
1710+const struct dma_map_ops swiotlb_dma_ops = {
1711 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1712 .free_coherent = swiotlb_free_coherent,
1713 .map_page = swiotlb_map_page,
1714diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1715index 609d500..7dde2a8 100644
1716--- a/arch/ia64/kernel/sys_ia64.c
1717+++ b/arch/ia64/kernel/sys_ia64.c
1718@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1719 if (REGION_NUMBER(addr) == RGN_HPAGE)
1720 addr = 0;
1721 #endif
1722+
1723+#ifdef CONFIG_PAX_RANDMMAP
1724+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1725+ addr = mm->free_area_cache;
1726+ else
1727+#endif
1728+
1729 if (!addr)
1730 addr = mm->free_area_cache;
1731
1732@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1733 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1734 /* At this point: (!vma || addr < vma->vm_end). */
1735 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1736- if (start_addr != TASK_UNMAPPED_BASE) {
1737+ if (start_addr != mm->mmap_base) {
1738 /* Start a new search --- just in case we missed some holes. */
1739- addr = TASK_UNMAPPED_BASE;
1740+ addr = mm->mmap_base;
1741 goto full_search;
1742 }
1743 return -ENOMEM;
1744 }
1745- if (!vma || addr + len <= vma->vm_start) {
1746+ if (check_heap_stack_gap(vma, addr, len)) {
1747 /* Remember the address where we stopped this search: */
1748 mm->free_area_cache = addr + len;
1749 return addr;
1750diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1751index 8f06035..b3a5818 100644
1752--- a/arch/ia64/kernel/topology.c
1753+++ b/arch/ia64/kernel/topology.c
1754@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1755 return ret;
1756 }
1757
1758-static struct sysfs_ops cache_sysfs_ops = {
1759+static const struct sysfs_ops cache_sysfs_ops = {
1760 .show = cache_show
1761 };
1762
1763diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1764index 0a0c77b..8e55a81 100644
1765--- a/arch/ia64/kernel/vmlinux.lds.S
1766+++ b/arch/ia64/kernel/vmlinux.lds.S
1767@@ -190,7 +190,7 @@ SECTIONS
1768 /* Per-cpu data: */
1769 . = ALIGN(PERCPU_PAGE_SIZE);
1770 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1771- __phys_per_cpu_start = __per_cpu_load;
1772+ __phys_per_cpu_start = per_cpu_load;
1773 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1774 * into percpu page size
1775 */
1776diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1777index 19261a9..1611b7a 100644
1778--- a/arch/ia64/mm/fault.c
1779+++ b/arch/ia64/mm/fault.c
1780@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1781 return pte_present(pte);
1782 }
1783
1784+#ifdef CONFIG_PAX_PAGEEXEC
1785+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1786+{
1787+ unsigned long i;
1788+
1789+ printk(KERN_ERR "PAX: bytes at PC: ");
1790+ for (i = 0; i < 8; i++) {
1791+ unsigned int c;
1792+ if (get_user(c, (unsigned int *)pc+i))
1793+ printk(KERN_CONT "???????? ");
1794+ else
1795+ printk(KERN_CONT "%08x ", c);
1796+ }
1797+ printk("\n");
1798+}
1799+#endif
1800+
1801 void __kprobes
1802 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1803 {
1804@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1805 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1806 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1807
1808- if ((vma->vm_flags & mask) != mask)
1809+ if ((vma->vm_flags & mask) != mask) {
1810+
1811+#ifdef CONFIG_PAX_PAGEEXEC
1812+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1813+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1814+ goto bad_area;
1815+
1816+ up_read(&mm->mmap_sem);
1817+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1818+ do_group_exit(SIGKILL);
1819+ }
1820+#endif
1821+
1822 goto bad_area;
1823
1824+ }
1825+
1826 survive:
1827 /*
1828 * If for any reason at all we couldn't handle the fault, make
1829diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1830index b0f6157..a082bbc 100644
1831--- a/arch/ia64/mm/hugetlbpage.c
1832+++ b/arch/ia64/mm/hugetlbpage.c
1833@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1834 /* At this point: (!vmm || addr < vmm->vm_end). */
1835 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1836 return -ENOMEM;
1837- if (!vmm || (addr + len) <= vmm->vm_start)
1838+ if (check_heap_stack_gap(vmm, addr, len))
1839 return addr;
1840 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1841 }
1842diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1843index 1857766..05cc6a3 100644
1844--- a/arch/ia64/mm/init.c
1845+++ b/arch/ia64/mm/init.c
1846@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1847 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1848 vma->vm_end = vma->vm_start + PAGE_SIZE;
1849 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1850+
1851+#ifdef CONFIG_PAX_PAGEEXEC
1852+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1853+ vma->vm_flags &= ~VM_EXEC;
1854+
1855+#ifdef CONFIG_PAX_MPROTECT
1856+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1857+ vma->vm_flags &= ~VM_MAYEXEC;
1858+#endif
1859+
1860+ }
1861+#endif
1862+
1863 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1864 down_write(&current->mm->mmap_sem);
1865 if (insert_vm_struct(current->mm, vma)) {
1866diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1867index 98b6849..8046766 100644
1868--- a/arch/ia64/sn/pci/pci_dma.c
1869+++ b/arch/ia64/sn/pci/pci_dma.c
1870@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1871 return ret;
1872 }
1873
1874-static struct dma_map_ops sn_dma_ops = {
1875+static const struct dma_map_ops sn_dma_ops = {
1876 .alloc_coherent = sn_dma_alloc_coherent,
1877 .free_coherent = sn_dma_free_coherent,
1878 .map_page = sn_dma_map_page,
1879diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1880index 82abd15..d95ae5d 100644
1881--- a/arch/m32r/lib/usercopy.c
1882+++ b/arch/m32r/lib/usercopy.c
1883@@ -14,6 +14,9 @@
1884 unsigned long
1885 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1886 {
1887+ if ((long)n < 0)
1888+ return n;
1889+
1890 prefetch(from);
1891 if (access_ok(VERIFY_WRITE, to, n))
1892 __copy_user(to,from,n);
1893@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1894 unsigned long
1895 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1896 {
1897+ if ((long)n < 0)
1898+ return n;
1899+
1900 prefetchw(to);
1901 if (access_ok(VERIFY_READ, from, n))
1902 __copy_user_zeroing(to,from,n);
1903diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1904index 77f5021..2b1db8a 100644
1905--- a/arch/mips/Makefile
1906+++ b/arch/mips/Makefile
1907@@ -51,6 +51,8 @@ endif
1908 cflags-y := -ffunction-sections
1909 cflags-y += $(call cc-option, -mno-check-zero-division)
1910
1911+cflags-y += -Wno-sign-compare -Wno-extra
1912+
1913 ifdef CONFIG_32BIT
1914 ld-emul = $(32bit-emul)
1915 vmlinux-32 = vmlinux
1916diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1917index 632f986..fd0378d 100644
1918--- a/arch/mips/alchemy/devboards/pm.c
1919+++ b/arch/mips/alchemy/devboards/pm.c
1920@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1921
1922 }
1923
1924-static struct platform_suspend_ops db1x_pm_ops = {
1925+static const struct platform_suspend_ops db1x_pm_ops = {
1926 .valid = suspend_valid_only_mem,
1927 .begin = db1x_pm_begin,
1928 .enter = db1x_pm_enter,
1929diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1930index 7990694..4e93acf 100644
1931--- a/arch/mips/include/asm/elf.h
1932+++ b/arch/mips/include/asm/elf.h
1933@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1934 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1935 #endif
1936
1937+#ifdef CONFIG_PAX_ASLR
1938+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1939+
1940+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1941+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1942+#endif
1943+
1944 #endif /* _ASM_ELF_H */
1945diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1946index f266295..627cfff 100644
1947--- a/arch/mips/include/asm/page.h
1948+++ b/arch/mips/include/asm/page.h
1949@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1950 #ifdef CONFIG_CPU_MIPS32
1951 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1952 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1953- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1954+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1955 #else
1956 typedef struct { unsigned long long pte; } pte_t;
1957 #define pte_val(x) ((x).pte)
1958diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1959index e48c0bf..f3acf65 100644
1960--- a/arch/mips/include/asm/reboot.h
1961+++ b/arch/mips/include/asm/reboot.h
1962@@ -9,7 +9,7 @@
1963 #ifndef _ASM_REBOOT_H
1964 #define _ASM_REBOOT_H
1965
1966-extern void (*_machine_restart)(char *command);
1967-extern void (*_machine_halt)(void);
1968+extern void (*__noreturn _machine_restart)(char *command);
1969+extern void (*__noreturn _machine_halt)(void);
1970
1971 #endif /* _ASM_REBOOT_H */
1972diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1973index 83b5509..9fa24a23 100644
1974--- a/arch/mips/include/asm/system.h
1975+++ b/arch/mips/include/asm/system.h
1976@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1977 */
1978 #define __ARCH_WANT_UNLOCKED_CTXSW
1979
1980-extern unsigned long arch_align_stack(unsigned long sp);
1981+#define arch_align_stack(x) ((x) & ~0xfUL)
1982
1983 #endif /* _ASM_SYSTEM_H */
1984diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1985index 9fdd8bc..fcf9d68 100644
1986--- a/arch/mips/kernel/binfmt_elfn32.c
1987+++ b/arch/mips/kernel/binfmt_elfn32.c
1988@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1989 #undef ELF_ET_DYN_BASE
1990 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1991
1992+#ifdef CONFIG_PAX_ASLR
1993+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1994+
1995+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1996+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1997+#endif
1998+
1999 #include <asm/processor.h>
2000 #include <linux/module.h>
2001 #include <linux/elfcore.h>
2002diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2003index ff44823..cf0b48a 100644
2004--- a/arch/mips/kernel/binfmt_elfo32.c
2005+++ b/arch/mips/kernel/binfmt_elfo32.c
2006@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2007 #undef ELF_ET_DYN_BASE
2008 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2009
2010+#ifdef CONFIG_PAX_ASLR
2011+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2012+
2013+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2014+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2015+#endif
2016+
2017 #include <asm/processor.h>
2018
2019 /*
2020diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2021index 50c9bb8..efdd5f8 100644
2022--- a/arch/mips/kernel/kgdb.c
2023+++ b/arch/mips/kernel/kgdb.c
2024@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2025 return -1;
2026 }
2027
2028+/* cannot be const */
2029 struct kgdb_arch arch_kgdb_ops;
2030
2031 /*
2032diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2033index f3d73e1..bb3f57a 100644
2034--- a/arch/mips/kernel/process.c
2035+++ b/arch/mips/kernel/process.c
2036@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2037 out:
2038 return pc;
2039 }
2040-
2041-/*
2042- * Don't forget that the stack pointer must be aligned on a 8 bytes
2043- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2044- */
2045-unsigned long arch_align_stack(unsigned long sp)
2046-{
2047- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2048- sp -= get_random_int() & ~PAGE_MASK;
2049-
2050- return sp & ALMASK;
2051-}
2052diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2053index 060563a..7fbf310 100644
2054--- a/arch/mips/kernel/reset.c
2055+++ b/arch/mips/kernel/reset.c
2056@@ -19,8 +19,8 @@
2057 * So handle all using function pointers to machine specific
2058 * functions.
2059 */
2060-void (*_machine_restart)(char *command);
2061-void (*_machine_halt)(void);
2062+void (*__noreturn _machine_restart)(char *command);
2063+void (*__noreturn _machine_halt)(void);
2064 void (*pm_power_off)(void);
2065
2066 EXPORT_SYMBOL(pm_power_off);
2067@@ -29,16 +29,19 @@ void machine_restart(char *command)
2068 {
2069 if (_machine_restart)
2070 _machine_restart(command);
2071+ BUG();
2072 }
2073
2074 void machine_halt(void)
2075 {
2076 if (_machine_halt)
2077 _machine_halt();
2078+ BUG();
2079 }
2080
2081 void machine_power_off(void)
2082 {
2083 if (pm_power_off)
2084 pm_power_off();
2085+ BUG();
2086 }
2087diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2088index 3f7f466..3abe0b5 100644
2089--- a/arch/mips/kernel/syscall.c
2090+++ b/arch/mips/kernel/syscall.c
2091@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2092 do_color_align = 0;
2093 if (filp || (flags & MAP_SHARED))
2094 do_color_align = 1;
2095+
2096+#ifdef CONFIG_PAX_RANDMMAP
2097+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2098+#endif
2099+
2100 if (addr) {
2101 if (do_color_align)
2102 addr = COLOUR_ALIGN(addr, pgoff);
2103 else
2104 addr = PAGE_ALIGN(addr);
2105 vmm = find_vma(current->mm, addr);
2106- if (task_size - len >= addr &&
2107- (!vmm || addr + len <= vmm->vm_start))
2108+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2109 return addr;
2110 }
2111- addr = TASK_UNMAPPED_BASE;
2112+ addr = current->mm->mmap_base;
2113 if (do_color_align)
2114 addr = COLOUR_ALIGN(addr, pgoff);
2115 else
2116@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2117 /* At this point: (!vmm || addr < vmm->vm_end). */
2118 if (task_size - len < addr)
2119 return -ENOMEM;
2120- if (!vmm || addr + len <= vmm->vm_start)
2121+ if (check_heap_stack_gap(vmm, addr, len))
2122 return addr;
2123 addr = vmm->vm_end;
2124 if (do_color_align)
2125diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2126index e97a7a2..f18f5b0 100644
2127--- a/arch/mips/mm/fault.c
2128+++ b/arch/mips/mm/fault.c
2129@@ -26,6 +26,23 @@
2130 #include <asm/ptrace.h>
2131 #include <asm/highmem.h> /* For VMALLOC_END */
2132
2133+#ifdef CONFIG_PAX_PAGEEXEC
2134+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2135+{
2136+ unsigned long i;
2137+
2138+ printk(KERN_ERR "PAX: bytes at PC: ");
2139+ for (i = 0; i < 5; i++) {
2140+ unsigned int c;
2141+ if (get_user(c, (unsigned int *)pc+i))
2142+ printk(KERN_CONT "???????? ");
2143+ else
2144+ printk(KERN_CONT "%08x ", c);
2145+ }
2146+ printk("\n");
2147+}
2148+#endif
2149+
2150 /*
2151 * This routine handles page faults. It determines the address,
2152 * and the problem, and then passes it off to one of the appropriate
2153diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2154index 9c802eb..0592e41 100644
2155--- a/arch/parisc/include/asm/elf.h
2156+++ b/arch/parisc/include/asm/elf.h
2157@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2158
2159 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2160
2161+#ifdef CONFIG_PAX_ASLR
2162+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2163+
2164+#define PAX_DELTA_MMAP_LEN 16
2165+#define PAX_DELTA_STACK_LEN 16
2166+#endif
2167+
2168 /* This yields a mask that user programs can use to figure out what
2169 instruction set this CPU supports. This could be done in user space,
2170 but it's not easy, and we've already done it here. */
2171diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2172index a27d2e2..18fd845 100644
2173--- a/arch/parisc/include/asm/pgtable.h
2174+++ b/arch/parisc/include/asm/pgtable.h
2175@@ -207,6 +207,17 @@
2176 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2177 #define PAGE_COPY PAGE_EXECREAD
2178 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2179+
2180+#ifdef CONFIG_PAX_PAGEEXEC
2181+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2182+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2183+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2184+#else
2185+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2186+# define PAGE_COPY_NOEXEC PAGE_COPY
2187+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2188+#endif
2189+
2190 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2191 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2192 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2193diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2194index 2120746..8d70a5e 100644
2195--- a/arch/parisc/kernel/module.c
2196+++ b/arch/parisc/kernel/module.c
2197@@ -95,16 +95,38 @@
2198
2199 /* three functions to determine where in the module core
2200 * or init pieces the location is */
2201+static inline int in_init_rx(struct module *me, void *loc)
2202+{
2203+ return (loc >= me->module_init_rx &&
2204+ loc < (me->module_init_rx + me->init_size_rx));
2205+}
2206+
2207+static inline int in_init_rw(struct module *me, void *loc)
2208+{
2209+ return (loc >= me->module_init_rw &&
2210+ loc < (me->module_init_rw + me->init_size_rw));
2211+}
2212+
2213 static inline int in_init(struct module *me, void *loc)
2214 {
2215- return (loc >= me->module_init &&
2216- loc <= (me->module_init + me->init_size));
2217+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2218+}
2219+
2220+static inline int in_core_rx(struct module *me, void *loc)
2221+{
2222+ return (loc >= me->module_core_rx &&
2223+ loc < (me->module_core_rx + me->core_size_rx));
2224+}
2225+
2226+static inline int in_core_rw(struct module *me, void *loc)
2227+{
2228+ return (loc >= me->module_core_rw &&
2229+ loc < (me->module_core_rw + me->core_size_rw));
2230 }
2231
2232 static inline int in_core(struct module *me, void *loc)
2233 {
2234- return (loc >= me->module_core &&
2235- loc <= (me->module_core + me->core_size));
2236+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2237 }
2238
2239 static inline int in_local(struct module *me, void *loc)
2240@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2241 }
2242
2243 /* align things a bit */
2244- me->core_size = ALIGN(me->core_size, 16);
2245- me->arch.got_offset = me->core_size;
2246- me->core_size += gots * sizeof(struct got_entry);
2247+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2248+ me->arch.got_offset = me->core_size_rw;
2249+ me->core_size_rw += gots * sizeof(struct got_entry);
2250
2251- me->core_size = ALIGN(me->core_size, 16);
2252- me->arch.fdesc_offset = me->core_size;
2253- me->core_size += fdescs * sizeof(Elf_Fdesc);
2254+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2255+ me->arch.fdesc_offset = me->core_size_rw;
2256+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2257
2258 me->arch.got_max = gots;
2259 me->arch.fdesc_max = fdescs;
2260@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2261
2262 BUG_ON(value == 0);
2263
2264- got = me->module_core + me->arch.got_offset;
2265+ got = me->module_core_rw + me->arch.got_offset;
2266 for (i = 0; got[i].addr; i++)
2267 if (got[i].addr == value)
2268 goto out;
2269@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2270 #ifdef CONFIG_64BIT
2271 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2272 {
2273- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2274+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2275
2276 if (!value) {
2277 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2278@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2279
2280 /* Create new one */
2281 fdesc->addr = value;
2282- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2283+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2284 return (Elf_Addr)fdesc;
2285 }
2286 #endif /* CONFIG_64BIT */
2287@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2288
2289 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2290 end = table + sechdrs[me->arch.unwind_section].sh_size;
2291- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2292+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2293
2294 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2295 me->arch.unwind_section, table, end, gp);
2296diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2297index 9147391..f3d949a 100644
2298--- a/arch/parisc/kernel/sys_parisc.c
2299+++ b/arch/parisc/kernel/sys_parisc.c
2300@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2301 /* At this point: (!vma || addr < vma->vm_end). */
2302 if (TASK_SIZE - len < addr)
2303 return -ENOMEM;
2304- if (!vma || addr + len <= vma->vm_start)
2305+ if (check_heap_stack_gap(vma, addr, len))
2306 return addr;
2307 addr = vma->vm_end;
2308 }
2309@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2310 /* At this point: (!vma || addr < vma->vm_end). */
2311 if (TASK_SIZE - len < addr)
2312 return -ENOMEM;
2313- if (!vma || addr + len <= vma->vm_start)
2314+ if (check_heap_stack_gap(vma, addr, len))
2315 return addr;
2316 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2317 if (addr < vma->vm_end) /* handle wraparound */
2318@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2319 if (flags & MAP_FIXED)
2320 return addr;
2321 if (!addr)
2322- addr = TASK_UNMAPPED_BASE;
2323+ addr = current->mm->mmap_base;
2324
2325 if (filp) {
2326 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2327diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2328index 8b58bf0..7afff03 100644
2329--- a/arch/parisc/kernel/traps.c
2330+++ b/arch/parisc/kernel/traps.c
2331@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2332
2333 down_read(&current->mm->mmap_sem);
2334 vma = find_vma(current->mm,regs->iaoq[0]);
2335- if (vma && (regs->iaoq[0] >= vma->vm_start)
2336- && (vma->vm_flags & VM_EXEC)) {
2337-
2338+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2339 fault_address = regs->iaoq[0];
2340 fault_space = regs->iasq[0];
2341
2342diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2343index c6afbfc..c5839f6 100644
2344--- a/arch/parisc/mm/fault.c
2345+++ b/arch/parisc/mm/fault.c
2346@@ -15,6 +15,7 @@
2347 #include <linux/sched.h>
2348 #include <linux/interrupt.h>
2349 #include <linux/module.h>
2350+#include <linux/unistd.h>
2351
2352 #include <asm/uaccess.h>
2353 #include <asm/traps.h>
2354@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2355 static unsigned long
2356 parisc_acctyp(unsigned long code, unsigned int inst)
2357 {
2358- if (code == 6 || code == 16)
2359+ if (code == 6 || code == 7 || code == 16)
2360 return VM_EXEC;
2361
2362 switch (inst & 0xf0000000) {
2363@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2364 }
2365 #endif
2366
2367+#ifdef CONFIG_PAX_PAGEEXEC
2368+/*
2369+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2370+ *
2371+ * returns 1 when task should be killed
2372+ * 2 when rt_sigreturn trampoline was detected
2373+ * 3 when unpatched PLT trampoline was detected
2374+ */
2375+static int pax_handle_fetch_fault(struct pt_regs *regs)
2376+{
2377+
2378+#ifdef CONFIG_PAX_EMUPLT
2379+ int err;
2380+
2381+ do { /* PaX: unpatched PLT emulation */
2382+ unsigned int bl, depwi;
2383+
2384+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2385+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2386+
2387+ if (err)
2388+ break;
2389+
2390+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2391+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2392+
2393+ err = get_user(ldw, (unsigned int *)addr);
2394+ err |= get_user(bv, (unsigned int *)(addr+4));
2395+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2396+
2397+ if (err)
2398+ break;
2399+
2400+ if (ldw == 0x0E801096U &&
2401+ bv == 0xEAC0C000U &&
2402+ ldw2 == 0x0E881095U)
2403+ {
2404+ unsigned int resolver, map;
2405+
2406+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2407+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2408+ if (err)
2409+ break;
2410+
2411+ regs->gr[20] = instruction_pointer(regs)+8;
2412+ regs->gr[21] = map;
2413+ regs->gr[22] = resolver;
2414+ regs->iaoq[0] = resolver | 3UL;
2415+ regs->iaoq[1] = regs->iaoq[0] + 4;
2416+ return 3;
2417+ }
2418+ }
2419+ } while (0);
2420+#endif
2421+
2422+#ifdef CONFIG_PAX_EMUTRAMP
2423+
2424+#ifndef CONFIG_PAX_EMUSIGRT
2425+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2426+ return 1;
2427+#endif
2428+
2429+ do { /* PaX: rt_sigreturn emulation */
2430+ unsigned int ldi1, ldi2, bel, nop;
2431+
2432+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2433+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2434+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2435+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2436+
2437+ if (err)
2438+ break;
2439+
2440+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2441+ ldi2 == 0x3414015AU &&
2442+ bel == 0xE4008200U &&
2443+ nop == 0x08000240U)
2444+ {
2445+ regs->gr[25] = (ldi1 & 2) >> 1;
2446+ regs->gr[20] = __NR_rt_sigreturn;
2447+ regs->gr[31] = regs->iaoq[1] + 16;
2448+ regs->sr[0] = regs->iasq[1];
2449+ regs->iaoq[0] = 0x100UL;
2450+ regs->iaoq[1] = regs->iaoq[0] + 4;
2451+ regs->iasq[0] = regs->sr[2];
2452+ regs->iasq[1] = regs->sr[2];
2453+ return 2;
2454+ }
2455+ } while (0);
2456+#endif
2457+
2458+ return 1;
2459+}
2460+
2461+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2462+{
2463+ unsigned long i;
2464+
2465+ printk(KERN_ERR "PAX: bytes at PC: ");
2466+ for (i = 0; i < 5; i++) {
2467+ unsigned int c;
2468+ if (get_user(c, (unsigned int *)pc+i))
2469+ printk(KERN_CONT "???????? ");
2470+ else
2471+ printk(KERN_CONT "%08x ", c);
2472+ }
2473+ printk("\n");
2474+}
2475+#endif
2476+
2477 int fixup_exception(struct pt_regs *regs)
2478 {
2479 const struct exception_table_entry *fix;
2480@@ -192,8 +303,33 @@ good_area:
2481
2482 acc_type = parisc_acctyp(code,regs->iir);
2483
2484- if ((vma->vm_flags & acc_type) != acc_type)
2485+ if ((vma->vm_flags & acc_type) != acc_type) {
2486+
2487+#ifdef CONFIG_PAX_PAGEEXEC
2488+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2489+ (address & ~3UL) == instruction_pointer(regs))
2490+ {
2491+ up_read(&mm->mmap_sem);
2492+ switch (pax_handle_fetch_fault(regs)) {
2493+
2494+#ifdef CONFIG_PAX_EMUPLT
2495+ case 3:
2496+ return;
2497+#endif
2498+
2499+#ifdef CONFIG_PAX_EMUTRAMP
2500+ case 2:
2501+ return;
2502+#endif
2503+
2504+ }
2505+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2506+ do_group_exit(SIGKILL);
2507+ }
2508+#endif
2509+
2510 goto bad_area;
2511+ }
2512
2513 /*
2514 * If for any reason at all we couldn't handle the fault, make
2515diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2516index c107b74..409dc0f 100644
2517--- a/arch/powerpc/Makefile
2518+++ b/arch/powerpc/Makefile
2519@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2520 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2521 CPP = $(CC) -E $(KBUILD_CFLAGS)
2522
2523+cflags-y += -Wno-sign-compare -Wno-extra
2524+
2525 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2526
2527 ifeq ($(CONFIG_PPC64),y)
2528diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2529index 6d94d27..50d4cad 100644
2530--- a/arch/powerpc/include/asm/device.h
2531+++ b/arch/powerpc/include/asm/device.h
2532@@ -14,7 +14,7 @@ struct dev_archdata {
2533 struct device_node *of_node;
2534
2535 /* DMA operations on that device */
2536- struct dma_map_ops *dma_ops;
2537+ const struct dma_map_ops *dma_ops;
2538
2539 /*
2540 * When an iommu is in use, dma_data is used as a ptr to the base of the
2541diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2542index e281dae..2b8a784 100644
2543--- a/arch/powerpc/include/asm/dma-mapping.h
2544+++ b/arch/powerpc/include/asm/dma-mapping.h
2545@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2546 #ifdef CONFIG_PPC64
2547 extern struct dma_map_ops dma_iommu_ops;
2548 #endif
2549-extern struct dma_map_ops dma_direct_ops;
2550+extern const struct dma_map_ops dma_direct_ops;
2551
2552-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2553+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2554 {
2555 /* We don't handle the NULL dev case for ISA for now. We could
2556 * do it via an out of line call but it is not needed for now. The
2557@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2558 return dev->archdata.dma_ops;
2559 }
2560
2561-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2562+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2563 {
2564 dev->archdata.dma_ops = ops;
2565 }
2566@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2567
2568 static inline int dma_supported(struct device *dev, u64 mask)
2569 {
2570- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2571+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2572
2573 if (unlikely(dma_ops == NULL))
2574 return 0;
2575@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2576
2577 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2578 {
2579- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2580+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2581
2582 if (unlikely(dma_ops == NULL))
2583 return -EIO;
2584@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2585 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2586 dma_addr_t *dma_handle, gfp_t flag)
2587 {
2588- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2589+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2590 void *cpu_addr;
2591
2592 BUG_ON(!dma_ops);
2593@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2594 static inline void dma_free_coherent(struct device *dev, size_t size,
2595 void *cpu_addr, dma_addr_t dma_handle)
2596 {
2597- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2598+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2599
2600 BUG_ON(!dma_ops);
2601
2602@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2603
2604 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2605 {
2606- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2607+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2608
2609 if (dma_ops->mapping_error)
2610 return dma_ops->mapping_error(dev, dma_addr);
2611diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2612index 5698502..5db093c 100644
2613--- a/arch/powerpc/include/asm/elf.h
2614+++ b/arch/powerpc/include/asm/elf.h
2615@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2616 the loader. We need to make sure that it is out of the way of the program
2617 that it will "exec", and that there is sufficient room for the brk. */
2618
2619-extern unsigned long randomize_et_dyn(unsigned long base);
2620-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2621+#define ELF_ET_DYN_BASE (0x20000000)
2622+
2623+#ifdef CONFIG_PAX_ASLR
2624+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2625+
2626+#ifdef __powerpc64__
2627+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2628+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2629+#else
2630+#define PAX_DELTA_MMAP_LEN 15
2631+#define PAX_DELTA_STACK_LEN 15
2632+#endif
2633+#endif
2634
2635 /*
2636 * Our registers are always unsigned longs, whether we're a 32 bit
2637@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2638 (0x7ff >> (PAGE_SHIFT - 12)) : \
2639 (0x3ffff >> (PAGE_SHIFT - 12)))
2640
2641-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2642-#define arch_randomize_brk arch_randomize_brk
2643-
2644 #endif /* __KERNEL__ */
2645
2646 /*
2647diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2648index edfc980..1766f59 100644
2649--- a/arch/powerpc/include/asm/iommu.h
2650+++ b/arch/powerpc/include/asm/iommu.h
2651@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2652 extern void iommu_init_early_dart(void);
2653 extern void iommu_init_early_pasemi(void);
2654
2655+/* dma-iommu.c */
2656+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2657+
2658 #ifdef CONFIG_PCI
2659 extern void pci_iommu_init(void);
2660 extern void pci_direct_iommu_init(void);
2661diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2662index 9163695..5a00112 100644
2663--- a/arch/powerpc/include/asm/kmap_types.h
2664+++ b/arch/powerpc/include/asm/kmap_types.h
2665@@ -26,6 +26,7 @@ enum km_type {
2666 KM_SOFTIRQ1,
2667 KM_PPC_SYNC_PAGE,
2668 KM_PPC_SYNC_ICACHE,
2669+ KM_CLEARPAGE,
2670 KM_TYPE_NR
2671 };
2672
2673diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2674index ff24254..fe45b21 100644
2675--- a/arch/powerpc/include/asm/page.h
2676+++ b/arch/powerpc/include/asm/page.h
2677@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2678 * and needs to be executable. This means the whole heap ends
2679 * up being executable.
2680 */
2681-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2682- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2683+#define VM_DATA_DEFAULT_FLAGS32 \
2684+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2685+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2686
2687 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2688 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2689@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2690 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2691 #endif
2692
2693+#define ktla_ktva(addr) (addr)
2694+#define ktva_ktla(addr) (addr)
2695+
2696 #ifndef __ASSEMBLY__
2697
2698 #undef STRICT_MM_TYPECHECKS
2699diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2700index 3f17b83..1f9e766 100644
2701--- a/arch/powerpc/include/asm/page_64.h
2702+++ b/arch/powerpc/include/asm/page_64.h
2703@@ -180,15 +180,18 @@ do { \
2704 * stack by default, so in the absense of a PT_GNU_STACK program header
2705 * we turn execute permission off.
2706 */
2707-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2708- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2709+#define VM_STACK_DEFAULT_FLAGS32 \
2710+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2711+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2712
2713 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2714 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2715
2716+#ifndef CONFIG_PAX_PAGEEXEC
2717 #define VM_STACK_DEFAULT_FLAGS \
2718 (test_thread_flag(TIF_32BIT) ? \
2719 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2720+#endif
2721
2722 #include <asm-generic/getorder.h>
2723
2724diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2725index b5ea626..4030822 100644
2726--- a/arch/powerpc/include/asm/pci.h
2727+++ b/arch/powerpc/include/asm/pci.h
2728@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2729 }
2730
2731 #ifdef CONFIG_PCI
2732-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2733-extern struct dma_map_ops *get_pci_dma_ops(void);
2734+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2735+extern const struct dma_map_ops *get_pci_dma_ops(void);
2736 #else /* CONFIG_PCI */
2737 #define set_pci_dma_ops(d)
2738 #define get_pci_dma_ops() NULL
2739diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2740index 2a5da06..d65bea2 100644
2741--- a/arch/powerpc/include/asm/pgtable.h
2742+++ b/arch/powerpc/include/asm/pgtable.h
2743@@ -2,6 +2,7 @@
2744 #define _ASM_POWERPC_PGTABLE_H
2745 #ifdef __KERNEL__
2746
2747+#include <linux/const.h>
2748 #ifndef __ASSEMBLY__
2749 #include <asm/processor.h> /* For TASK_SIZE */
2750 #include <asm/mmu.h>
2751diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2752index 4aad413..85d86bf 100644
2753--- a/arch/powerpc/include/asm/pte-hash32.h
2754+++ b/arch/powerpc/include/asm/pte-hash32.h
2755@@ -21,6 +21,7 @@
2756 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2757 #define _PAGE_USER 0x004 /* usermode access allowed */
2758 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2759+#define _PAGE_EXEC _PAGE_GUARDED
2760 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2761 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2762 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2763diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2764index 8c34149..78f425a 100644
2765--- a/arch/powerpc/include/asm/ptrace.h
2766+++ b/arch/powerpc/include/asm/ptrace.h
2767@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2768 } while(0)
2769
2770 struct task_struct;
2771-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2772+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2773 extern int ptrace_put_reg(struct task_struct *task, int regno,
2774 unsigned long data);
2775
2776diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2777index 32a7c30..be3a8bb 100644
2778--- a/arch/powerpc/include/asm/reg.h
2779+++ b/arch/powerpc/include/asm/reg.h
2780@@ -191,6 +191,7 @@
2781 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2782 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2783 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2784+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2785 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2786 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2787 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2788diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2789index 8979d4c..d2fd0d3 100644
2790--- a/arch/powerpc/include/asm/swiotlb.h
2791+++ b/arch/powerpc/include/asm/swiotlb.h
2792@@ -13,7 +13,7 @@
2793
2794 #include <linux/swiotlb.h>
2795
2796-extern struct dma_map_ops swiotlb_dma_ops;
2797+extern const struct dma_map_ops swiotlb_dma_ops;
2798
2799 static inline void dma_mark_clean(void *addr, size_t size) {}
2800
2801diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2802index 094a12a..877a60a 100644
2803--- a/arch/powerpc/include/asm/system.h
2804+++ b/arch/powerpc/include/asm/system.h
2805@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2806 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2807 #endif
2808
2809-extern unsigned long arch_align_stack(unsigned long sp);
2810+#define arch_align_stack(x) ((x) & ~0xfUL)
2811
2812 /* Used in very early kernel initialization. */
2813 extern unsigned long reloc_offset(void);
2814diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2815index bd0fb84..a42a14b 100644
2816--- a/arch/powerpc/include/asm/uaccess.h
2817+++ b/arch/powerpc/include/asm/uaccess.h
2818@@ -13,6 +13,8 @@
2819 #define VERIFY_READ 0
2820 #define VERIFY_WRITE 1
2821
2822+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2823+
2824 /*
2825 * The fs value determines whether argument validity checking should be
2826 * performed or not. If get_fs() == USER_DS, checking is performed, with
2827@@ -327,52 +329,6 @@ do { \
2828 extern unsigned long __copy_tofrom_user(void __user *to,
2829 const void __user *from, unsigned long size);
2830
2831-#ifndef __powerpc64__
2832-
2833-static inline unsigned long copy_from_user(void *to,
2834- const void __user *from, unsigned long n)
2835-{
2836- unsigned long over;
2837-
2838- if (access_ok(VERIFY_READ, from, n))
2839- return __copy_tofrom_user((__force void __user *)to, from, n);
2840- if ((unsigned long)from < TASK_SIZE) {
2841- over = (unsigned long)from + n - TASK_SIZE;
2842- return __copy_tofrom_user((__force void __user *)to, from,
2843- n - over) + over;
2844- }
2845- return n;
2846-}
2847-
2848-static inline unsigned long copy_to_user(void __user *to,
2849- const void *from, unsigned long n)
2850-{
2851- unsigned long over;
2852-
2853- if (access_ok(VERIFY_WRITE, to, n))
2854- return __copy_tofrom_user(to, (__force void __user *)from, n);
2855- if ((unsigned long)to < TASK_SIZE) {
2856- over = (unsigned long)to + n - TASK_SIZE;
2857- return __copy_tofrom_user(to, (__force void __user *)from,
2858- n - over) + over;
2859- }
2860- return n;
2861-}
2862-
2863-#else /* __powerpc64__ */
2864-
2865-#define __copy_in_user(to, from, size) \
2866- __copy_tofrom_user((to), (from), (size))
2867-
2868-extern unsigned long copy_from_user(void *to, const void __user *from,
2869- unsigned long n);
2870-extern unsigned long copy_to_user(void __user *to, const void *from,
2871- unsigned long n);
2872-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2873- unsigned long n);
2874-
2875-#endif /* __powerpc64__ */
2876-
2877 static inline unsigned long __copy_from_user_inatomic(void *to,
2878 const void __user *from, unsigned long n)
2879 {
2880@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2881 if (ret == 0)
2882 return 0;
2883 }
2884+
2885+ if (!__builtin_constant_p(n))
2886+ check_object_size(to, n, false);
2887+
2888 return __copy_tofrom_user((__force void __user *)to, from, n);
2889 }
2890
2891@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2892 if (ret == 0)
2893 return 0;
2894 }
2895+
2896+ if (!__builtin_constant_p(n))
2897+ check_object_size(from, n, true);
2898+
2899 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2900 }
2901
2902@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2903 return __copy_to_user_inatomic(to, from, size);
2904 }
2905
2906+#ifndef __powerpc64__
2907+
2908+static inline unsigned long __must_check copy_from_user(void *to,
2909+ const void __user *from, unsigned long n)
2910+{
2911+ unsigned long over;
2912+
2913+ if ((long)n < 0)
2914+ return n;
2915+
2916+ if (access_ok(VERIFY_READ, from, n)) {
2917+ if (!__builtin_constant_p(n))
2918+ check_object_size(to, n, false);
2919+ return __copy_tofrom_user((__force void __user *)to, from, n);
2920+ }
2921+ if ((unsigned long)from < TASK_SIZE) {
2922+ over = (unsigned long)from + n - TASK_SIZE;
2923+ if (!__builtin_constant_p(n - over))
2924+ check_object_size(to, n - over, false);
2925+ return __copy_tofrom_user((__force void __user *)to, from,
2926+ n - over) + over;
2927+ }
2928+ return n;
2929+}
2930+
2931+static inline unsigned long __must_check copy_to_user(void __user *to,
2932+ const void *from, unsigned long n)
2933+{
2934+ unsigned long over;
2935+
2936+ if ((long)n < 0)
2937+ return n;
2938+
2939+ if (access_ok(VERIFY_WRITE, to, n)) {
2940+ if (!__builtin_constant_p(n))
2941+ check_object_size(from, n, true);
2942+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2943+ }
2944+ if ((unsigned long)to < TASK_SIZE) {
2945+ over = (unsigned long)to + n - TASK_SIZE;
2946+ if (!__builtin_constant_p(n))
2947+ check_object_size(from, n - over, true);
2948+ return __copy_tofrom_user(to, (__force void __user *)from,
2949+ n - over) + over;
2950+ }
2951+ return n;
2952+}
2953+
2954+#else /* __powerpc64__ */
2955+
2956+#define __copy_in_user(to, from, size) \
2957+ __copy_tofrom_user((to), (from), (size))
2958+
2959+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2960+{
2961+ if ((long)n < 0 || n > INT_MAX)
2962+ return n;
2963+
2964+ if (!__builtin_constant_p(n))
2965+ check_object_size(to, n, false);
2966+
2967+ if (likely(access_ok(VERIFY_READ, from, n)))
2968+ n = __copy_from_user(to, from, n);
2969+ else
2970+ memset(to, 0, n);
2971+ return n;
2972+}
2973+
2974+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2975+{
2976+ if ((long)n < 0 || n > INT_MAX)
2977+ return n;
2978+
2979+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2980+ if (!__builtin_constant_p(n))
2981+ check_object_size(from, n, true);
2982+ n = __copy_to_user(to, from, n);
2983+ }
2984+ return n;
2985+}
2986+
2987+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2988+ unsigned long n);
2989+
2990+#endif /* __powerpc64__ */
2991+
2992 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2993
2994 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2995diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
2996index bb37b1d..01fe9ce 100644
2997--- a/arch/powerpc/kernel/cacheinfo.c
2998+++ b/arch/powerpc/kernel/cacheinfo.c
2999@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3000 &cache_assoc_attr,
3001 };
3002
3003-static struct sysfs_ops cache_index_ops = {
3004+static const struct sysfs_ops cache_index_ops = {
3005 .show = cache_index_show,
3006 };
3007
3008diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3009index 37771a5..648530c 100644
3010--- a/arch/powerpc/kernel/dma-iommu.c
3011+++ b/arch/powerpc/kernel/dma-iommu.c
3012@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3013 }
3014
3015 /* We support DMA to/from any memory page via the iommu */
3016-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3017+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3018 {
3019 struct iommu_table *tbl = get_iommu_table_base(dev);
3020
3021diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3022index e96cbbd..bdd6d41 100644
3023--- a/arch/powerpc/kernel/dma-swiotlb.c
3024+++ b/arch/powerpc/kernel/dma-swiotlb.c
3025@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3026 * map_page, and unmap_page on highmem, use normal dma_ops
3027 * for everything else.
3028 */
3029-struct dma_map_ops swiotlb_dma_ops = {
3030+const struct dma_map_ops swiotlb_dma_ops = {
3031 .alloc_coherent = dma_direct_alloc_coherent,
3032 .free_coherent = dma_direct_free_coherent,
3033 .map_sg = swiotlb_map_sg_attrs,
3034diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3035index 6215062..ebea59c 100644
3036--- a/arch/powerpc/kernel/dma.c
3037+++ b/arch/powerpc/kernel/dma.c
3038@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3039 }
3040 #endif
3041
3042-struct dma_map_ops dma_direct_ops = {
3043+const struct dma_map_ops dma_direct_ops = {
3044 .alloc_coherent = dma_direct_alloc_coherent,
3045 .free_coherent = dma_direct_free_coherent,
3046 .map_sg = dma_direct_map_sg,
3047diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3048index 24dcc0e..a300455 100644
3049--- a/arch/powerpc/kernel/exceptions-64e.S
3050+++ b/arch/powerpc/kernel/exceptions-64e.S
3051@@ -455,6 +455,7 @@ storage_fault_common:
3052 std r14,_DAR(r1)
3053 std r15,_DSISR(r1)
3054 addi r3,r1,STACK_FRAME_OVERHEAD
3055+ bl .save_nvgprs
3056 mr r4,r14
3057 mr r5,r15
3058 ld r14,PACA_EXGEN+EX_R14(r13)
3059@@ -464,8 +465,7 @@ storage_fault_common:
3060 cmpdi r3,0
3061 bne- 1f
3062 b .ret_from_except_lite
3063-1: bl .save_nvgprs
3064- mr r5,r3
3065+1: mr r5,r3
3066 addi r3,r1,STACK_FRAME_OVERHEAD
3067 ld r4,_DAR(r1)
3068 bl .bad_page_fault
3069diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3070index 1808876..9fd206a 100644
3071--- a/arch/powerpc/kernel/exceptions-64s.S
3072+++ b/arch/powerpc/kernel/exceptions-64s.S
3073@@ -818,10 +818,10 @@ handle_page_fault:
3074 11: ld r4,_DAR(r1)
3075 ld r5,_DSISR(r1)
3076 addi r3,r1,STACK_FRAME_OVERHEAD
3077+ bl .save_nvgprs
3078 bl .do_page_fault
3079 cmpdi r3,0
3080 beq+ 13f
3081- bl .save_nvgprs
3082 mr r5,r3
3083 addi r3,r1,STACK_FRAME_OVERHEAD
3084 lwz r4,_DAR(r1)
3085diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3086index a4c8b38..1b09ad9 100644
3087--- a/arch/powerpc/kernel/ibmebus.c
3088+++ b/arch/powerpc/kernel/ibmebus.c
3089@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3090 return 1;
3091 }
3092
3093-static struct dma_map_ops ibmebus_dma_ops = {
3094+static const struct dma_map_ops ibmebus_dma_ops = {
3095 .alloc_coherent = ibmebus_alloc_coherent,
3096 .free_coherent = ibmebus_free_coherent,
3097 .map_sg = ibmebus_map_sg,
3098diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3099index 641c74b..8339ad7 100644
3100--- a/arch/powerpc/kernel/kgdb.c
3101+++ b/arch/powerpc/kernel/kgdb.c
3102@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3103 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3104 return 0;
3105
3106- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3107+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3108 regs->nip += 4;
3109
3110 return 1;
3111@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3112 /*
3113 * Global data
3114 */
3115-struct kgdb_arch arch_kgdb_ops = {
3116+const struct kgdb_arch arch_kgdb_ops = {
3117 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3118 };
3119
3120diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3121index 477c663..4f50234 100644
3122--- a/arch/powerpc/kernel/module.c
3123+++ b/arch/powerpc/kernel/module.c
3124@@ -31,11 +31,24 @@
3125
3126 LIST_HEAD(module_bug_list);
3127
3128+#ifdef CONFIG_PAX_KERNEXEC
3129 void *module_alloc(unsigned long size)
3130 {
3131 if (size == 0)
3132 return NULL;
3133
3134+ return vmalloc(size);
3135+}
3136+
3137+void *module_alloc_exec(unsigned long size)
3138+#else
3139+void *module_alloc(unsigned long size)
3140+#endif
3141+
3142+{
3143+ if (size == 0)
3144+ return NULL;
3145+
3146 return vmalloc_exec(size);
3147 }
3148
3149@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3150 vfree(module_region);
3151 }
3152
3153+#ifdef CONFIG_PAX_KERNEXEC
3154+void module_free_exec(struct module *mod, void *module_region)
3155+{
3156+ module_free(mod, module_region);
3157+}
3158+#endif
3159+
3160 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3161 const Elf_Shdr *sechdrs,
3162 const char *name)
3163diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3164index f832773..0507238 100644
3165--- a/arch/powerpc/kernel/module_32.c
3166+++ b/arch/powerpc/kernel/module_32.c
3167@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3168 me->arch.core_plt_section = i;
3169 }
3170 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3171- printk("Module doesn't contain .plt or .init.plt sections.\n");
3172+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3173 return -ENOEXEC;
3174 }
3175
3176@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3177
3178 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3179 /* Init, or core PLT? */
3180- if (location >= mod->module_core
3181- && location < mod->module_core + mod->core_size)
3182+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3183+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3184 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3185- else
3186+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3187+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3188 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3189+ else {
3190+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3191+ return ~0UL;
3192+ }
3193
3194 /* Find this entry, or if that fails, the next avail. entry */
3195 while (entry->jump[0]) {
3196diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3197index cadbed6..b9bbb00 100644
3198--- a/arch/powerpc/kernel/pci-common.c
3199+++ b/arch/powerpc/kernel/pci-common.c
3200@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3201 unsigned int ppc_pci_flags = 0;
3202
3203
3204-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3205+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3206
3207-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3208+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3209 {
3210 pci_dma_ops = dma_ops;
3211 }
3212
3213-struct dma_map_ops *get_pci_dma_ops(void)
3214+const struct dma_map_ops *get_pci_dma_ops(void)
3215 {
3216 return pci_dma_ops;
3217 }
3218diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3219index 7b816da..8d5c277 100644
3220--- a/arch/powerpc/kernel/process.c
3221+++ b/arch/powerpc/kernel/process.c
3222@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3223 * Lookup NIP late so we have the best change of getting the
3224 * above info out without failing
3225 */
3226- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3227- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3228+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3229+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3230 #endif
3231 show_stack(current, (unsigned long *) regs->gpr[1]);
3232 if (!user_mode(regs))
3233@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3234 newsp = stack[0];
3235 ip = stack[STACK_FRAME_LR_SAVE];
3236 if (!firstframe || ip != lr) {
3237- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3238+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3239 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3240 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3241- printk(" (%pS)",
3242+ printk(" (%pA)",
3243 (void *)current->ret_stack[curr_frame].ret);
3244 curr_frame--;
3245 }
3246@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3247 struct pt_regs *regs = (struct pt_regs *)
3248 (sp + STACK_FRAME_OVERHEAD);
3249 lr = regs->link;
3250- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3251+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3252 regs->trap, (void *)regs->nip, (void *)lr);
3253 firstframe = 1;
3254 }
3255@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3256 }
3257
3258 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3259-
3260-unsigned long arch_align_stack(unsigned long sp)
3261-{
3262- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3263- sp -= get_random_int() & ~PAGE_MASK;
3264- return sp & ~0xf;
3265-}
3266-
3267-static inline unsigned long brk_rnd(void)
3268-{
3269- unsigned long rnd = 0;
3270-
3271- /* 8MB for 32bit, 1GB for 64bit */
3272- if (is_32bit_task())
3273- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3274- else
3275- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3276-
3277- return rnd << PAGE_SHIFT;
3278-}
3279-
3280-unsigned long arch_randomize_brk(struct mm_struct *mm)
3281-{
3282- unsigned long base = mm->brk;
3283- unsigned long ret;
3284-
3285-#ifdef CONFIG_PPC_STD_MMU_64
3286- /*
3287- * If we are using 1TB segments and we are allowed to randomise
3288- * the heap, we can put it above 1TB so it is backed by a 1TB
3289- * segment. Otherwise the heap will be in the bottom 1TB
3290- * which always uses 256MB segments and this may result in a
3291- * performance penalty.
3292- */
3293- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3294- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3295-#endif
3296-
3297- ret = PAGE_ALIGN(base + brk_rnd());
3298-
3299- if (ret < mm->brk)
3300- return mm->brk;
3301-
3302- return ret;
3303-}
3304-
3305-unsigned long randomize_et_dyn(unsigned long base)
3306-{
3307- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3308-
3309- if (ret < base)
3310- return base;
3311-
3312- return ret;
3313-}
3314diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3315index ef14988..856c4bc 100644
3316--- a/arch/powerpc/kernel/ptrace.c
3317+++ b/arch/powerpc/kernel/ptrace.c
3318@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3319 /*
3320 * Get contents of register REGNO in task TASK.
3321 */
3322-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3323+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3324 {
3325 if (task->thread.regs == NULL)
3326 return -EIO;
3327@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3328
3329 CHECK_FULL_REGS(child->thread.regs);
3330 if (index < PT_FPR0) {
3331- tmp = ptrace_get_reg(child, (int) index);
3332+ tmp = ptrace_get_reg(child, index);
3333 } else {
3334 flush_fp_to_thread(child);
3335 tmp = ((unsigned long *)child->thread.fpr)
3336diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3337index d670429..2bc59b2 100644
3338--- a/arch/powerpc/kernel/signal_32.c
3339+++ b/arch/powerpc/kernel/signal_32.c
3340@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3341 /* Save user registers on the stack */
3342 frame = &rt_sf->uc.uc_mcontext;
3343 addr = frame;
3344- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3345+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3346 if (save_user_regs(regs, frame, 0, 1))
3347 goto badframe;
3348 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3349diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3350index 2fe6fc6..ada0d96 100644
3351--- a/arch/powerpc/kernel/signal_64.c
3352+++ b/arch/powerpc/kernel/signal_64.c
3353@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3354 current->thread.fpscr.val = 0;
3355
3356 /* Set up to return from userspace. */
3357- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3358+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3359 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3360 } else {
3361 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3362diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3363index b97c2d6..dd01a6a 100644
3364--- a/arch/powerpc/kernel/sys_ppc32.c
3365+++ b/arch/powerpc/kernel/sys_ppc32.c
3366@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3367 if (oldlenp) {
3368 if (!error) {
3369 if (get_user(oldlen, oldlenp) ||
3370- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3371+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3372+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3373 error = -EFAULT;
3374 }
3375- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3376 }
3377 return error;
3378 }
3379diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3380index 6f0ae1a..e4b6a56 100644
3381--- a/arch/powerpc/kernel/traps.c
3382+++ b/arch/powerpc/kernel/traps.c
3383@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3384 static inline void pmac_backlight_unblank(void) { }
3385 #endif
3386
3387+extern void gr_handle_kernel_exploit(void);
3388+
3389 int die(const char *str, struct pt_regs *regs, long err)
3390 {
3391 static struct {
3392@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3393 if (panic_on_oops)
3394 panic("Fatal exception");
3395
3396+ gr_handle_kernel_exploit();
3397+
3398 oops_exit();
3399 do_exit(err);
3400
3401diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3402index 137dc22..fe57a79 100644
3403--- a/arch/powerpc/kernel/vdso.c
3404+++ b/arch/powerpc/kernel/vdso.c
3405@@ -36,6 +36,7 @@
3406 #include <asm/firmware.h>
3407 #include <asm/vdso.h>
3408 #include <asm/vdso_datapage.h>
3409+#include <asm/mman.h>
3410
3411 #include "setup.h"
3412
3413@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3414 vdso_base = VDSO32_MBASE;
3415 #endif
3416
3417- current->mm->context.vdso_base = 0;
3418+ current->mm->context.vdso_base = ~0UL;
3419
3420 /* vDSO has a problem and was disabled, just don't "enable" it for the
3421 * process
3422@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3423 vdso_base = get_unmapped_area(NULL, vdso_base,
3424 (vdso_pages << PAGE_SHIFT) +
3425 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3426- 0, 0);
3427+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3428 if (IS_ERR_VALUE(vdso_base)) {
3429 rc = vdso_base;
3430 goto fail_mmapsem;
3431diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3432index 77f6421..829564a 100644
3433--- a/arch/powerpc/kernel/vio.c
3434+++ b/arch/powerpc/kernel/vio.c
3435@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3436 vio_cmo_dealloc(viodev, alloc_size);
3437 }
3438
3439-struct dma_map_ops vio_dma_mapping_ops = {
3440+static const struct dma_map_ops vio_dma_mapping_ops = {
3441 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3442 .free_coherent = vio_dma_iommu_free_coherent,
3443 .map_sg = vio_dma_iommu_map_sg,
3444 .unmap_sg = vio_dma_iommu_unmap_sg,
3445+ .dma_supported = dma_iommu_dma_supported,
3446 .map_page = vio_dma_iommu_map_page,
3447 .unmap_page = vio_dma_iommu_unmap_page,
3448
3449@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3450
3451 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3452 {
3453- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3454 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3455 }
3456
3457diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3458index 5eea6f3..5d10396 100644
3459--- a/arch/powerpc/lib/usercopy_64.c
3460+++ b/arch/powerpc/lib/usercopy_64.c
3461@@ -9,22 +9,6 @@
3462 #include <linux/module.h>
3463 #include <asm/uaccess.h>
3464
3465-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3466-{
3467- if (likely(access_ok(VERIFY_READ, from, n)))
3468- n = __copy_from_user(to, from, n);
3469- else
3470- memset(to, 0, n);
3471- return n;
3472-}
3473-
3474-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3475-{
3476- if (likely(access_ok(VERIFY_WRITE, to, n)))
3477- n = __copy_to_user(to, from, n);
3478- return n;
3479-}
3480-
3481 unsigned long copy_in_user(void __user *to, const void __user *from,
3482 unsigned long n)
3483 {
3484@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3485 return n;
3486 }
3487
3488-EXPORT_SYMBOL(copy_from_user);
3489-EXPORT_SYMBOL(copy_to_user);
3490 EXPORT_SYMBOL(copy_in_user);
3491
3492diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3493index e7dae82..877ce0d 100644
3494--- a/arch/powerpc/mm/fault.c
3495+++ b/arch/powerpc/mm/fault.c
3496@@ -30,6 +30,10 @@
3497 #include <linux/kprobes.h>
3498 #include <linux/kdebug.h>
3499 #include <linux/perf_event.h>
3500+#include <linux/slab.h>
3501+#include <linux/pagemap.h>
3502+#include <linux/compiler.h>
3503+#include <linux/unistd.h>
3504
3505 #include <asm/firmware.h>
3506 #include <asm/page.h>
3507@@ -40,6 +44,7 @@
3508 #include <asm/uaccess.h>
3509 #include <asm/tlbflush.h>
3510 #include <asm/siginfo.h>
3511+#include <asm/ptrace.h>
3512
3513
3514 #ifdef CONFIG_KPROBES
3515@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3516 }
3517 #endif
3518
3519+#ifdef CONFIG_PAX_PAGEEXEC
3520+/*
3521+ * PaX: decide what to do with offenders (regs->nip = fault address)
3522+ *
3523+ * returns 1 when task should be killed
3524+ */
3525+static int pax_handle_fetch_fault(struct pt_regs *regs)
3526+{
3527+ return 1;
3528+}
3529+
3530+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3531+{
3532+ unsigned long i;
3533+
3534+ printk(KERN_ERR "PAX: bytes at PC: ");
3535+ for (i = 0; i < 5; i++) {
3536+ unsigned int c;
3537+ if (get_user(c, (unsigned int __user *)pc+i))
3538+ printk(KERN_CONT "???????? ");
3539+ else
3540+ printk(KERN_CONT "%08x ", c);
3541+ }
3542+ printk("\n");
3543+}
3544+#endif
3545+
3546 /*
3547 * Check whether the instruction at regs->nip is a store using
3548 * an update addressing form which will update r1.
3549@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3550 * indicate errors in DSISR but can validly be set in SRR1.
3551 */
3552 if (trap == 0x400)
3553- error_code &= 0x48200000;
3554+ error_code &= 0x58200000;
3555 else
3556 is_write = error_code & DSISR_ISSTORE;
3557 #else
3558@@ -250,7 +282,7 @@ good_area:
3559 * "undefined". Of those that can be set, this is the only
3560 * one which seems bad.
3561 */
3562- if (error_code & 0x10000000)
3563+ if (error_code & DSISR_GUARDED)
3564 /* Guarded storage error. */
3565 goto bad_area;
3566 #endif /* CONFIG_8xx */
3567@@ -265,7 +297,7 @@ good_area:
3568 * processors use the same I/D cache coherency mechanism
3569 * as embedded.
3570 */
3571- if (error_code & DSISR_PROTFAULT)
3572+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3573 goto bad_area;
3574 #endif /* CONFIG_PPC_STD_MMU */
3575
3576@@ -335,6 +367,23 @@ bad_area:
3577 bad_area_nosemaphore:
3578 /* User mode accesses cause a SIGSEGV */
3579 if (user_mode(regs)) {
3580+
3581+#ifdef CONFIG_PAX_PAGEEXEC
3582+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3583+#ifdef CONFIG_PPC_STD_MMU
3584+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3585+#else
3586+ if (is_exec && regs->nip == address) {
3587+#endif
3588+ switch (pax_handle_fetch_fault(regs)) {
3589+ }
3590+
3591+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3592+ do_group_exit(SIGKILL);
3593+ }
3594+ }
3595+#endif
3596+
3597 _exception(SIGSEGV, regs, code, address);
3598 return 0;
3599 }
3600diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3601index 5973631..ad617af 100644
3602--- a/arch/powerpc/mm/mem.c
3603+++ b/arch/powerpc/mm/mem.c
3604@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3605 {
3606 unsigned long lmb_next_region_start_pfn,
3607 lmb_region_max_pfn;
3608- int i;
3609+ unsigned int i;
3610
3611 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3612 lmb_region_max_pfn =
3613diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3614index 0d957a4..26d968f 100644
3615--- a/arch/powerpc/mm/mmap_64.c
3616+++ b/arch/powerpc/mm/mmap_64.c
3617@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3618 */
3619 if (mmap_is_legacy()) {
3620 mm->mmap_base = TASK_UNMAPPED_BASE;
3621+
3622+#ifdef CONFIG_PAX_RANDMMAP
3623+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3624+ mm->mmap_base += mm->delta_mmap;
3625+#endif
3626+
3627 mm->get_unmapped_area = arch_get_unmapped_area;
3628 mm->unmap_area = arch_unmap_area;
3629 } else {
3630 mm->mmap_base = mmap_base();
3631+
3632+#ifdef CONFIG_PAX_RANDMMAP
3633+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3634+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3635+#endif
3636+
3637 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3638 mm->unmap_area = arch_unmap_area_topdown;
3639 }
3640diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3641index ba51948..23009d9 100644
3642--- a/arch/powerpc/mm/slice.c
3643+++ b/arch/powerpc/mm/slice.c
3644@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3645 if ((mm->task_size - len) < addr)
3646 return 0;
3647 vma = find_vma(mm, addr);
3648- return (!vma || (addr + len) <= vma->vm_start);
3649+ return check_heap_stack_gap(vma, addr, len);
3650 }
3651
3652 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3653@@ -256,7 +256,7 @@ full_search:
3654 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3655 continue;
3656 }
3657- if (!vma || addr + len <= vma->vm_start) {
3658+ if (check_heap_stack_gap(vma, addr, len)) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3663 }
3664 }
3665
3666- addr = mm->mmap_base;
3667- while (addr > len) {
3668+ if (mm->mmap_base < len)
3669+ addr = -ENOMEM;
3670+ else
3671+ addr = mm->mmap_base - len;
3672+
3673+ while (!IS_ERR_VALUE(addr)) {
3674 /* Go down by chunk size */
3675- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3676+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3677
3678 /* Check for hit with different page size */
3679 mask = slice_range_to_mask(addr, len);
3680@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3681 * return with success:
3682 */
3683 vma = find_vma(mm, addr);
3684- if (!vma || (addr + len) <= vma->vm_start) {
3685+ if (check_heap_stack_gap(vma, addr, len)) {
3686 /* remember the address as a hint for next time */
3687 if (use_cache)
3688 mm->free_area_cache = addr;
3689@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3690 mm->cached_hole_size = vma->vm_start - addr;
3691
3692 /* try just below the current vma->vm_start */
3693- addr = vma->vm_start;
3694+ addr = skip_heap_stack_gap(vma, len);
3695 }
3696
3697 /*
3698@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3699 if (fixed && addr > (mm->task_size - len))
3700 return -EINVAL;
3701
3702+#ifdef CONFIG_PAX_RANDMMAP
3703+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3704+ addr = 0;
3705+#endif
3706+
3707 /* If hint, make sure it matches our alignment restrictions */
3708 if (!fixed && addr) {
3709 addr = _ALIGN_UP(addr, 1ul << pshift);
3710diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3711index b5c753d..8f01abe 100644
3712--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3713+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3714@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3715 lite5200_pm_target_state = PM_SUSPEND_ON;
3716 }
3717
3718-static struct platform_suspend_ops lite5200_pm_ops = {
3719+static const struct platform_suspend_ops lite5200_pm_ops = {
3720 .valid = lite5200_pm_valid,
3721 .begin = lite5200_pm_begin,
3722 .prepare = lite5200_pm_prepare,
3723diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3724index a55b0b6..478c18e 100644
3725--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3726+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3727@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3728 iounmap(mbar);
3729 }
3730
3731-static struct platform_suspend_ops mpc52xx_pm_ops = {
3732+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3733 .valid = mpc52xx_pm_valid,
3734 .prepare = mpc52xx_pm_prepare,
3735 .enter = mpc52xx_pm_enter,
3736diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3737index 08e65fc..643d3ac 100644
3738--- a/arch/powerpc/platforms/83xx/suspend.c
3739+++ b/arch/powerpc/platforms/83xx/suspend.c
3740@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3741 return ret;
3742 }
3743
3744-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3745+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3746 .valid = mpc83xx_suspend_valid,
3747 .begin = mpc83xx_suspend_begin,
3748 .enter = mpc83xx_suspend_enter,
3749diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3750index ca5bfdf..1602e09 100644
3751--- a/arch/powerpc/platforms/cell/iommu.c
3752+++ b/arch/powerpc/platforms/cell/iommu.c
3753@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3754
3755 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3756
3757-struct dma_map_ops dma_iommu_fixed_ops = {
3758+const struct dma_map_ops dma_iommu_fixed_ops = {
3759 .alloc_coherent = dma_fixed_alloc_coherent,
3760 .free_coherent = dma_fixed_free_coherent,
3761 .map_sg = dma_fixed_map_sg,
3762diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3763index e34b305..20e48ec 100644
3764--- a/arch/powerpc/platforms/ps3/system-bus.c
3765+++ b/arch/powerpc/platforms/ps3/system-bus.c
3766@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3767 return mask >= DMA_BIT_MASK(32);
3768 }
3769
3770-static struct dma_map_ops ps3_sb_dma_ops = {
3771+static const struct dma_map_ops ps3_sb_dma_ops = {
3772 .alloc_coherent = ps3_alloc_coherent,
3773 .free_coherent = ps3_free_coherent,
3774 .map_sg = ps3_sb_map_sg,
3775@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3776 .unmap_page = ps3_unmap_page,
3777 };
3778
3779-static struct dma_map_ops ps3_ioc0_dma_ops = {
3780+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3781 .alloc_coherent = ps3_alloc_coherent,
3782 .free_coherent = ps3_free_coherent,
3783 .map_sg = ps3_ioc0_map_sg,
3784diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3785index f0e6f28..60d53ed 100644
3786--- a/arch/powerpc/platforms/pseries/Kconfig
3787+++ b/arch/powerpc/platforms/pseries/Kconfig
3788@@ -2,6 +2,8 @@ config PPC_PSERIES
3789 depends on PPC64 && PPC_BOOK3S
3790 bool "IBM pSeries & new (POWER5-based) iSeries"
3791 select MPIC
3792+ select PCI_MSI
3793+ select XICS
3794 select PPC_I8259
3795 select PPC_RTAS
3796 select RTAS_ERROR_LOGGING
3797diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3798index 43c0aca..42c045b 100644
3799--- a/arch/s390/Kconfig
3800+++ b/arch/s390/Kconfig
3801@@ -194,28 +194,26 @@ config AUDIT_ARCH
3802
3803 config S390_SWITCH_AMODE
3804 bool "Switch kernel/user addressing modes"
3805+ default y
3806 help
3807 This option allows to switch the addressing modes of kernel and user
3808- space. The kernel parameter switch_amode=on will enable this feature,
3809- default is disabled. Enabling this (via kernel parameter) on machines
3810- earlier than IBM System z9-109 EC/BC will reduce system performance.
3811+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3812+ will reduce system performance.
3813
3814 Note that this option will also be selected by selecting the execute
3815- protection option below. Enabling the execute protection via the
3816- noexec kernel parameter will also switch the addressing modes,
3817- independent of the switch_amode kernel parameter.
3818+ protection option below. Enabling the execute protection will also
3819+ switch the addressing modes, independent of this option.
3820
3821
3822 config S390_EXEC_PROTECT
3823 bool "Data execute protection"
3824+ default y
3825 select S390_SWITCH_AMODE
3826 help
3827 This option allows to enable a buffer overflow protection for user
3828 space programs and it also selects the addressing mode option above.
3829- The kernel parameter noexec=on will enable this feature and also
3830- switch the addressing modes, default is disabled. Enabling this (via
3831- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3832- will reduce system performance.
3833+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3834+ reduce system performance.
3835
3836 comment "Code generation options"
3837
3838diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3839index e885442..5e6c303 100644
3840--- a/arch/s390/include/asm/elf.h
3841+++ b/arch/s390/include/asm/elf.h
3842@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3843 that it will "exec", and that there is sufficient room for the brk. */
3844 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3845
3846+#ifdef CONFIG_PAX_ASLR
3847+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3848+
3849+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3850+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3851+#endif
3852+
3853 /* This yields a mask that user programs can use to figure out what
3854 instruction set this CPU supports. */
3855
3856diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3857index e37478e..9ce0e9f 100644
3858--- a/arch/s390/include/asm/setup.h
3859+++ b/arch/s390/include/asm/setup.h
3860@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3861 void detect_memory_layout(struct mem_chunk chunk[]);
3862
3863 #ifdef CONFIG_S390_SWITCH_AMODE
3864-extern unsigned int switch_amode;
3865+#define switch_amode (1)
3866 #else
3867 #define switch_amode (0)
3868 #endif
3869
3870 #ifdef CONFIG_S390_EXEC_PROTECT
3871-extern unsigned int s390_noexec;
3872+#define s390_noexec (1)
3873 #else
3874 #define s390_noexec (0)
3875 #endif
3876diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3877index 8377e91..e28e6f1 100644
3878--- a/arch/s390/include/asm/uaccess.h
3879+++ b/arch/s390/include/asm/uaccess.h
3880@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3881 copy_to_user(void __user *to, const void *from, unsigned long n)
3882 {
3883 might_fault();
3884+
3885+ if ((long)n < 0)
3886+ return n;
3887+
3888 if (access_ok(VERIFY_WRITE, to, n))
3889 n = __copy_to_user(to, from, n);
3890 return n;
3891@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3892 static inline unsigned long __must_check
3893 __copy_from_user(void *to, const void __user *from, unsigned long n)
3894 {
3895+ if ((long)n < 0)
3896+ return n;
3897+
3898 if (__builtin_constant_p(n) && (n <= 256))
3899 return uaccess.copy_from_user_small(n, from, to);
3900 else
3901@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3902 copy_from_user(void *to, const void __user *from, unsigned long n)
3903 {
3904 might_fault();
3905+
3906+ if ((long)n < 0)
3907+ return n;
3908+
3909 if (access_ok(VERIFY_READ, from, n))
3910 n = __copy_from_user(to, from, n);
3911 else
3912diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3913index 639380a..72e3c02 100644
3914--- a/arch/s390/kernel/module.c
3915+++ b/arch/s390/kernel/module.c
3916@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3917
3918 /* Increase core size by size of got & plt and set start
3919 offsets for got and plt. */
3920- me->core_size = ALIGN(me->core_size, 4);
3921- me->arch.got_offset = me->core_size;
3922- me->core_size += me->arch.got_size;
3923- me->arch.plt_offset = me->core_size;
3924- me->core_size += me->arch.plt_size;
3925+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3926+ me->arch.got_offset = me->core_size_rw;
3927+ me->core_size_rw += me->arch.got_size;
3928+ me->arch.plt_offset = me->core_size_rx;
3929+ me->core_size_rx += me->arch.plt_size;
3930 return 0;
3931 }
3932
3933@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3934 if (info->got_initialized == 0) {
3935 Elf_Addr *gotent;
3936
3937- gotent = me->module_core + me->arch.got_offset +
3938+ gotent = me->module_core_rw + me->arch.got_offset +
3939 info->got_offset;
3940 *gotent = val;
3941 info->got_initialized = 1;
3942@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3943 else if (r_type == R_390_GOTENT ||
3944 r_type == R_390_GOTPLTENT)
3945 *(unsigned int *) loc =
3946- (val + (Elf_Addr) me->module_core - loc) >> 1;
3947+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3948 else if (r_type == R_390_GOT64 ||
3949 r_type == R_390_GOTPLT64)
3950 *(unsigned long *) loc = val;
3951@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3952 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3953 if (info->plt_initialized == 0) {
3954 unsigned int *ip;
3955- ip = me->module_core + me->arch.plt_offset +
3956+ ip = me->module_core_rx + me->arch.plt_offset +
3957 info->plt_offset;
3958 #ifndef CONFIG_64BIT
3959 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3960@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3961 val - loc + 0xffffUL < 0x1ffffeUL) ||
3962 (r_type == R_390_PLT32DBL &&
3963 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3964- val = (Elf_Addr) me->module_core +
3965+ val = (Elf_Addr) me->module_core_rx +
3966 me->arch.plt_offset +
3967 info->plt_offset;
3968 val += rela->r_addend - loc;
3969@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3970 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3971 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3972 val = val + rela->r_addend -
3973- ((Elf_Addr) me->module_core + me->arch.got_offset);
3974+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3975 if (r_type == R_390_GOTOFF16)
3976 *(unsigned short *) loc = val;
3977 else if (r_type == R_390_GOTOFF32)
3978@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3979 break;
3980 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3981 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3982- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3983+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3984 rela->r_addend - loc;
3985 if (r_type == R_390_GOTPC)
3986 *(unsigned int *) loc = val;
3987diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3988index 061479f..dbfb08c 100644
3989--- a/arch/s390/kernel/setup.c
3990+++ b/arch/s390/kernel/setup.c
3991@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3992 early_param("mem", early_parse_mem);
3993
3994 #ifdef CONFIG_S390_SWITCH_AMODE
3995-unsigned int switch_amode = 0;
3996-EXPORT_SYMBOL_GPL(switch_amode);
3997-
3998 static int set_amode_and_uaccess(unsigned long user_amode,
3999 unsigned long user32_amode)
4000 {
4001@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4002 return 0;
4003 }
4004 }
4005-
4006-/*
4007- * Switch kernel/user addressing modes?
4008- */
4009-static int __init early_parse_switch_amode(char *p)
4010-{
4011- switch_amode = 1;
4012- return 0;
4013-}
4014-early_param("switch_amode", early_parse_switch_amode);
4015-
4016 #else /* CONFIG_S390_SWITCH_AMODE */
4017 static inline int set_amode_and_uaccess(unsigned long user_amode,
4018 unsigned long user32_amode)
4019@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4020 }
4021 #endif /* CONFIG_S390_SWITCH_AMODE */
4022
4023-#ifdef CONFIG_S390_EXEC_PROTECT
4024-unsigned int s390_noexec = 0;
4025-EXPORT_SYMBOL_GPL(s390_noexec);
4026-
4027-/*
4028- * Enable execute protection?
4029- */
4030-static int __init early_parse_noexec(char *p)
4031-{
4032- if (!strncmp(p, "off", 3))
4033- return 0;
4034- switch_amode = 1;
4035- s390_noexec = 1;
4036- return 0;
4037-}
4038-early_param("noexec", early_parse_noexec);
4039-#endif /* CONFIG_S390_EXEC_PROTECT */
4040-
4041 static void setup_addressing_mode(void)
4042 {
4043 if (s390_noexec) {
4044diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4045index f4558cc..e461f37 100644
4046--- a/arch/s390/mm/mmap.c
4047+++ b/arch/s390/mm/mmap.c
4048@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4049 */
4050 if (mmap_is_legacy()) {
4051 mm->mmap_base = TASK_UNMAPPED_BASE;
4052+
4053+#ifdef CONFIG_PAX_RANDMMAP
4054+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4055+ mm->mmap_base += mm->delta_mmap;
4056+#endif
4057+
4058 mm->get_unmapped_area = arch_get_unmapped_area;
4059 mm->unmap_area = arch_unmap_area;
4060 } else {
4061 mm->mmap_base = mmap_base();
4062+
4063+#ifdef CONFIG_PAX_RANDMMAP
4064+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4065+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4066+#endif
4067+
4068 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4069 mm->unmap_area = arch_unmap_area_topdown;
4070 }
4071@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4072 */
4073 if (mmap_is_legacy()) {
4074 mm->mmap_base = TASK_UNMAPPED_BASE;
4075+
4076+#ifdef CONFIG_PAX_RANDMMAP
4077+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4078+ mm->mmap_base += mm->delta_mmap;
4079+#endif
4080+
4081 mm->get_unmapped_area = s390_get_unmapped_area;
4082 mm->unmap_area = arch_unmap_area;
4083 } else {
4084 mm->mmap_base = mmap_base();
4085+
4086+#ifdef CONFIG_PAX_RANDMMAP
4087+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4088+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4089+#endif
4090+
4091 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4092 mm->unmap_area = arch_unmap_area_topdown;
4093 }
4094diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4095index 589d5c7..669e274 100644
4096--- a/arch/score/include/asm/system.h
4097+++ b/arch/score/include/asm/system.h
4098@@ -17,7 +17,7 @@ do { \
4099 #define finish_arch_switch(prev) do {} while (0)
4100
4101 typedef void (*vi_handler_t)(void);
4102-extern unsigned long arch_align_stack(unsigned long sp);
4103+#define arch_align_stack(x) (x)
4104
4105 #define mb() barrier()
4106 #define rmb() barrier()
4107diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4108index 25d0803..d6c8e36 100644
4109--- a/arch/score/kernel/process.c
4110+++ b/arch/score/kernel/process.c
4111@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4112
4113 return task_pt_regs(task)->cp0_epc;
4114 }
4115-
4116-unsigned long arch_align_stack(unsigned long sp)
4117-{
4118- return sp;
4119-}
4120diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4121index d936c1a..304a252 100644
4122--- a/arch/sh/boards/mach-hp6xx/pm.c
4123+++ b/arch/sh/boards/mach-hp6xx/pm.c
4124@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4125 return 0;
4126 }
4127
4128-static struct platform_suspend_ops hp6x0_pm_ops = {
4129+static const struct platform_suspend_ops hp6x0_pm_ops = {
4130 .enter = hp6x0_pm_enter,
4131 .valid = suspend_valid_only_mem,
4132 };
4133diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4134index 8a8a993..7b3079b 100644
4135--- a/arch/sh/kernel/cpu/sh4/sq.c
4136+++ b/arch/sh/kernel/cpu/sh4/sq.c
4137@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4138 NULL,
4139 };
4140
4141-static struct sysfs_ops sq_sysfs_ops = {
4142+static const struct sysfs_ops sq_sysfs_ops = {
4143 .show = sq_sysfs_show,
4144 .store = sq_sysfs_store,
4145 };
4146diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4147index ee3c2aa..c49cee6 100644
4148--- a/arch/sh/kernel/cpu/shmobile/pm.c
4149+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4150@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4151 return 0;
4152 }
4153
4154-static struct platform_suspend_ops sh_pm_ops = {
4155+static const struct platform_suspend_ops sh_pm_ops = {
4156 .enter = sh_pm_enter,
4157 .valid = suspend_valid_only_mem,
4158 };
4159diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4160index 3e532d0..9faa306 100644
4161--- a/arch/sh/kernel/kgdb.c
4162+++ b/arch/sh/kernel/kgdb.c
4163@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4164 {
4165 }
4166
4167-struct kgdb_arch arch_kgdb_ops = {
4168+const struct kgdb_arch arch_kgdb_ops = {
4169 /* Breakpoint instruction: trapa #0x3c */
4170 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4171 .gdb_bpt_instr = { 0x3c, 0xc3 },
4172diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4173index afeb710..d1d1289 100644
4174--- a/arch/sh/mm/mmap.c
4175+++ b/arch/sh/mm/mmap.c
4176@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4177 addr = PAGE_ALIGN(addr);
4178
4179 vma = find_vma(mm, addr);
4180- if (TASK_SIZE - len >= addr &&
4181- (!vma || addr + len <= vma->vm_start))
4182+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4183 return addr;
4184 }
4185
4186@@ -106,7 +105,7 @@ full_search:
4187 }
4188 return -ENOMEM;
4189 }
4190- if (likely(!vma || addr + len <= vma->vm_start)) {
4191+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4192 /*
4193 * Remember the place where we stopped the search:
4194 */
4195@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4196 addr = PAGE_ALIGN(addr);
4197
4198 vma = find_vma(mm, addr);
4199- if (TASK_SIZE - len >= addr &&
4200- (!vma || addr + len <= vma->vm_start))
4201+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4202 return addr;
4203 }
4204
4205@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4206 /* make sure it can fit in the remaining address space */
4207 if (likely(addr > len)) {
4208 vma = find_vma(mm, addr-len);
4209- if (!vma || addr <= vma->vm_start) {
4210+ if (check_heap_stack_gap(vma, addr - len, len)) {
4211 /* remember the address as a hint for next time */
4212 return (mm->free_area_cache = addr-len);
4213 }
4214@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4215 if (unlikely(mm->mmap_base < len))
4216 goto bottomup;
4217
4218- addr = mm->mmap_base-len;
4219- if (do_colour_align)
4220- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4221+ addr = mm->mmap_base - len;
4222
4223 do {
4224+ if (do_colour_align)
4225+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4226 /*
4227 * Lookup failure means no vma is above this address,
4228 * else if new region fits below vma->vm_start,
4229 * return with success:
4230 */
4231 vma = find_vma(mm, addr);
4232- if (likely(!vma || addr+len <= vma->vm_start)) {
4233+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4234 /* remember the address as a hint for next time */
4235 return (mm->free_area_cache = addr);
4236 }
4237@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4238 mm->cached_hole_size = vma->vm_start - addr;
4239
4240 /* try just below the current vma->vm_start */
4241- addr = vma->vm_start-len;
4242- if (do_colour_align)
4243- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4244- } while (likely(len < vma->vm_start));
4245+ addr = skip_heap_stack_gap(vma, len);
4246+ } while (!IS_ERR_VALUE(addr));
4247
4248 bottomup:
4249 /*
4250diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4251index 113225b..7fd04e7 100644
4252--- a/arch/sparc/Makefile
4253+++ b/arch/sparc/Makefile
4254@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4255 # Export what is needed by arch/sparc/boot/Makefile
4256 export VMLINUX_INIT VMLINUX_MAIN
4257 VMLINUX_INIT := $(head-y) $(init-y)
4258-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4259+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4260 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4261 VMLINUX_MAIN += $(drivers-y) $(net-y)
4262
4263diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4264index f5cc06f..f858d47 100644
4265--- a/arch/sparc/include/asm/atomic_64.h
4266+++ b/arch/sparc/include/asm/atomic_64.h
4267@@ -14,18 +14,40 @@
4268 #define ATOMIC64_INIT(i) { (i) }
4269
4270 #define atomic_read(v) ((v)->counter)
4271+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4272+{
4273+ return v->counter;
4274+}
4275 #define atomic64_read(v) ((v)->counter)
4276+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4277+{
4278+ return v->counter;
4279+}
4280
4281 #define atomic_set(v, i) (((v)->counter) = i)
4282+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4283+{
4284+ v->counter = i;
4285+}
4286 #define atomic64_set(v, i) (((v)->counter) = i)
4287+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4288+{
4289+ v->counter = i;
4290+}
4291
4292 extern void atomic_add(int, atomic_t *);
4293+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4294 extern void atomic64_add(long, atomic64_t *);
4295+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4296 extern void atomic_sub(int, atomic_t *);
4297+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_sub(long, atomic64_t *);
4299+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4300
4301 extern int atomic_add_ret(int, atomic_t *);
4302+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4303 extern long atomic64_add_ret(long, atomic64_t *);
4304+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4305 extern int atomic_sub_ret(int, atomic_t *);
4306 extern long atomic64_sub_ret(long, atomic64_t *);
4307
4308@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4309 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4310
4311 #define atomic_inc_return(v) atomic_add_ret(1, v)
4312+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4313+{
4314+ return atomic_add_ret_unchecked(1, v);
4315+}
4316 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4317+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4318+{
4319+ return atomic64_add_ret_unchecked(1, v);
4320+}
4321
4322 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4323 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4324
4325 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4326+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4327+{
4328+ return atomic_add_ret_unchecked(i, v);
4329+}
4330 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4331+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4332+{
4333+ return atomic64_add_ret_unchecked(i, v);
4334+}
4335
4336 /*
4337 * atomic_inc_and_test - increment and test
4338@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4339 * other cases.
4340 */
4341 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4342+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4343+{
4344+ return atomic_inc_return_unchecked(v) == 0;
4345+}
4346 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4347
4348 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4349@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4350 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4351
4352 #define atomic_inc(v) atomic_add(1, v)
4353+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4354+{
4355+ atomic_add_unchecked(1, v);
4356+}
4357 #define atomic64_inc(v) atomic64_add(1, v)
4358+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4359+{
4360+ atomic64_add_unchecked(1, v);
4361+}
4362
4363 #define atomic_dec(v) atomic_sub(1, v)
4364+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4365+{
4366+ atomic_sub_unchecked(1, v);
4367+}
4368 #define atomic64_dec(v) atomic64_sub(1, v)
4369+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4370+{
4371+ atomic64_sub_unchecked(1, v);
4372+}
4373
4374 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4375 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4376
4377 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4378+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4379+{
4380+ return cmpxchg(&v->counter, old, new);
4381+}
4382 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4383+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4384+{
4385+ return xchg(&v->counter, new);
4386+}
4387
4388 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4389 {
4390- int c, old;
4391+ int c, old, new;
4392 c = atomic_read(v);
4393 for (;;) {
4394- if (unlikely(c == (u)))
4395+ if (unlikely(c == u))
4396 break;
4397- old = atomic_cmpxchg((v), c, c + (a));
4398+
4399+ asm volatile("addcc %2, %0, %0\n"
4400+
4401+#ifdef CONFIG_PAX_REFCOUNT
4402+ "tvs %%icc, 6\n"
4403+#endif
4404+
4405+ : "=r" (new)
4406+ : "0" (c), "ir" (a)
4407+ : "cc");
4408+
4409+ old = atomic_cmpxchg(v, c, new);
4410 if (likely(old == c))
4411 break;
4412 c = old;
4413 }
4414- return c != (u);
4415+ return c != u;
4416 }
4417
4418 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4419@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4420 #define atomic64_cmpxchg(v, o, n) \
4421 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4422 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4423+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4424+{
4425+ return xchg(&v->counter, new);
4426+}
4427
4428 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4429 {
4430- long c, old;
4431+ long c, old, new;
4432 c = atomic64_read(v);
4433 for (;;) {
4434- if (unlikely(c == (u)))
4435+ if (unlikely(c == u))
4436 break;
4437- old = atomic64_cmpxchg((v), c, c + (a));
4438+
4439+ asm volatile("addcc %2, %0, %0\n"
4440+
4441+#ifdef CONFIG_PAX_REFCOUNT
4442+ "tvs %%xcc, 6\n"
4443+#endif
4444+
4445+ : "=r" (new)
4446+ : "0" (c), "ir" (a)
4447+ : "cc");
4448+
4449+ old = atomic64_cmpxchg(v, c, new);
4450 if (likely(old == c))
4451 break;
4452 c = old;
4453 }
4454- return c != (u);
4455+ return c != u;
4456 }
4457
4458 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4459diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4460index 41f85ae..fb54d5e 100644
4461--- a/arch/sparc/include/asm/cache.h
4462+++ b/arch/sparc/include/asm/cache.h
4463@@ -8,7 +8,7 @@
4464 #define _SPARC_CACHE_H
4465
4466 #define L1_CACHE_SHIFT 5
4467-#define L1_CACHE_BYTES 32
4468+#define L1_CACHE_BYTES 32UL
4469 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4470
4471 #ifdef CONFIG_SPARC32
4472diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4473index 5a8c308..38def92 100644
4474--- a/arch/sparc/include/asm/dma-mapping.h
4475+++ b/arch/sparc/include/asm/dma-mapping.h
4476@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4477 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4478 #define dma_is_consistent(d, h) (1)
4479
4480-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4481+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4482 extern struct bus_type pci_bus_type;
4483
4484-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4485+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4486 {
4487 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4488 if (dev->bus == &pci_bus_type)
4489@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4490 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4491 dma_addr_t *dma_handle, gfp_t flag)
4492 {
4493- struct dma_map_ops *ops = get_dma_ops(dev);
4494+ const struct dma_map_ops *ops = get_dma_ops(dev);
4495 void *cpu_addr;
4496
4497 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4498@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4499 static inline void dma_free_coherent(struct device *dev, size_t size,
4500 void *cpu_addr, dma_addr_t dma_handle)
4501 {
4502- struct dma_map_ops *ops = get_dma_ops(dev);
4503+ const struct dma_map_ops *ops = get_dma_ops(dev);
4504
4505 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4506 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4507diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4508index 381a1b5..b97e3ff 100644
4509--- a/arch/sparc/include/asm/elf_32.h
4510+++ b/arch/sparc/include/asm/elf_32.h
4511@@ -116,6 +116,13 @@ typedef struct {
4512
4513 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4514
4515+#ifdef CONFIG_PAX_ASLR
4516+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4517+
4518+#define PAX_DELTA_MMAP_LEN 16
4519+#define PAX_DELTA_STACK_LEN 16
4520+#endif
4521+
4522 /* This yields a mask that user programs can use to figure out what
4523 instruction set this cpu supports. This can NOT be done in userspace
4524 on Sparc. */
4525diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4526index 9968085..c2106ef 100644
4527--- a/arch/sparc/include/asm/elf_64.h
4528+++ b/arch/sparc/include/asm/elf_64.h
4529@@ -163,6 +163,12 @@ typedef struct {
4530 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4531 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4532
4533+#ifdef CONFIG_PAX_ASLR
4534+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4535+
4536+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4537+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4538+#endif
4539
4540 /* This yields a mask that user programs can use to figure out what
4541 instruction set this cpu supports. */
4542diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4543index e0cabe7..efd60f1 100644
4544--- a/arch/sparc/include/asm/pgtable_32.h
4545+++ b/arch/sparc/include/asm/pgtable_32.h
4546@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4547 BTFIXUPDEF_INT(page_none)
4548 BTFIXUPDEF_INT(page_copy)
4549 BTFIXUPDEF_INT(page_readonly)
4550+
4551+#ifdef CONFIG_PAX_PAGEEXEC
4552+BTFIXUPDEF_INT(page_shared_noexec)
4553+BTFIXUPDEF_INT(page_copy_noexec)
4554+BTFIXUPDEF_INT(page_readonly_noexec)
4555+#endif
4556+
4557 BTFIXUPDEF_INT(page_kernel)
4558
4559 #define PMD_SHIFT SUN4C_PMD_SHIFT
4560@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4561 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4562 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4563
4564+#ifdef CONFIG_PAX_PAGEEXEC
4565+extern pgprot_t PAGE_SHARED_NOEXEC;
4566+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4567+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4568+#else
4569+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4570+# define PAGE_COPY_NOEXEC PAGE_COPY
4571+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4572+#endif
4573+
4574 extern unsigned long page_kernel;
4575
4576 #ifdef MODULE
4577diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4578index 1407c07..7e10231 100644
4579--- a/arch/sparc/include/asm/pgtsrmmu.h
4580+++ b/arch/sparc/include/asm/pgtsrmmu.h
4581@@ -115,6 +115,13 @@
4582 SRMMU_EXEC | SRMMU_REF)
4583 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4584 SRMMU_EXEC | SRMMU_REF)
4585+
4586+#ifdef CONFIG_PAX_PAGEEXEC
4587+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4588+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4589+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4590+#endif
4591+
4592 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4593 SRMMU_DIRTY | SRMMU_REF)
4594
4595diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4596index 43e5147..47622a1 100644
4597--- a/arch/sparc/include/asm/spinlock_64.h
4598+++ b/arch/sparc/include/asm/spinlock_64.h
4599@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4600
4601 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4602
4603-static void inline arch_read_lock(raw_rwlock_t *lock)
4604+static inline void arch_read_lock(raw_rwlock_t *lock)
4605 {
4606 unsigned long tmp1, tmp2;
4607
4608 __asm__ __volatile__ (
4609 "1: ldsw [%2], %0\n"
4610 " brlz,pn %0, 2f\n"
4611-"4: add %0, 1, %1\n"
4612+"4: addcc %0, 1, %1\n"
4613+
4614+#ifdef CONFIG_PAX_REFCOUNT
4615+" tvs %%icc, 6\n"
4616+#endif
4617+
4618 " cas [%2], %0, %1\n"
4619 " cmp %0, %1\n"
4620 " bne,pn %%icc, 1b\n"
4621@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4622 " .previous"
4623 : "=&r" (tmp1), "=&r" (tmp2)
4624 : "r" (lock)
4625- : "memory");
4626+ : "memory", "cc");
4627 }
4628
4629-static int inline arch_read_trylock(raw_rwlock_t *lock)
4630+static inline int arch_read_trylock(raw_rwlock_t *lock)
4631 {
4632 int tmp1, tmp2;
4633
4634@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4635 "1: ldsw [%2], %0\n"
4636 " brlz,a,pn %0, 2f\n"
4637 " mov 0, %0\n"
4638-" add %0, 1, %1\n"
4639+" addcc %0, 1, %1\n"
4640+
4641+#ifdef CONFIG_PAX_REFCOUNT
4642+" tvs %%icc, 6\n"
4643+#endif
4644+
4645 " cas [%2], %0, %1\n"
4646 " cmp %0, %1\n"
4647 " bne,pn %%icc, 1b\n"
4648@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4649 return tmp1;
4650 }
4651
4652-static void inline arch_read_unlock(raw_rwlock_t *lock)
4653+static inline void arch_read_unlock(raw_rwlock_t *lock)
4654 {
4655 unsigned long tmp1, tmp2;
4656
4657 __asm__ __volatile__(
4658 "1: lduw [%2], %0\n"
4659-" sub %0, 1, %1\n"
4660+" subcc %0, 1, %1\n"
4661+
4662+#ifdef CONFIG_PAX_REFCOUNT
4663+" tvs %%icc, 6\n"
4664+#endif
4665+
4666 " cas [%2], %0, %1\n"
4667 " cmp %0, %1\n"
4668 " bne,pn %%xcc, 1b\n"
4669@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4670 : "memory");
4671 }
4672
4673-static void inline arch_write_lock(raw_rwlock_t *lock)
4674+static inline void arch_write_lock(raw_rwlock_t *lock)
4675 {
4676 unsigned long mask, tmp1, tmp2;
4677
4678@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4679 : "memory");
4680 }
4681
4682-static void inline arch_write_unlock(raw_rwlock_t *lock)
4683+static inline void arch_write_unlock(raw_rwlock_t *lock)
4684 {
4685 __asm__ __volatile__(
4686 " stw %%g0, [%0]"
4687@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4688 : "memory");
4689 }
4690
4691-static int inline arch_write_trylock(raw_rwlock_t *lock)
4692+static inline int arch_write_trylock(raw_rwlock_t *lock)
4693 {
4694 unsigned long mask, tmp1, tmp2, result;
4695
4696diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4697index 844d73a..f787fb9 100644
4698--- a/arch/sparc/include/asm/thread_info_32.h
4699+++ b/arch/sparc/include/asm/thread_info_32.h
4700@@ -50,6 +50,8 @@ struct thread_info {
4701 unsigned long w_saved;
4702
4703 struct restart_block restart_block;
4704+
4705+ unsigned long lowest_stack;
4706 };
4707
4708 /*
4709diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4710index f78ad9a..9f55fc7 100644
4711--- a/arch/sparc/include/asm/thread_info_64.h
4712+++ b/arch/sparc/include/asm/thread_info_64.h
4713@@ -68,6 +68,8 @@ struct thread_info {
4714 struct pt_regs *kern_una_regs;
4715 unsigned int kern_una_insn;
4716
4717+ unsigned long lowest_stack;
4718+
4719 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4720 };
4721
4722diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4723index e88fbe5..96b0ce5 100644
4724--- a/arch/sparc/include/asm/uaccess.h
4725+++ b/arch/sparc/include/asm/uaccess.h
4726@@ -1,5 +1,13 @@
4727 #ifndef ___ASM_SPARC_UACCESS_H
4728 #define ___ASM_SPARC_UACCESS_H
4729+
4730+#ifdef __KERNEL__
4731+#ifndef __ASSEMBLY__
4732+#include <linux/types.h>
4733+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4734+#endif
4735+#endif
4736+
4737 #if defined(__sparc__) && defined(__arch64__)
4738 #include <asm/uaccess_64.h>
4739 #else
4740diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4741index 8303ac4..07f333d 100644
4742--- a/arch/sparc/include/asm/uaccess_32.h
4743+++ b/arch/sparc/include/asm/uaccess_32.h
4744@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4745
4746 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4747 {
4748- if (n && __access_ok((unsigned long) to, n))
4749+ if ((long)n < 0)
4750+ return n;
4751+
4752+ if (n && __access_ok((unsigned long) to, n)) {
4753+ if (!__builtin_constant_p(n))
4754+ check_object_size(from, n, true);
4755 return __copy_user(to, (__force void __user *) from, n);
4756- else
4757+ } else
4758 return n;
4759 }
4760
4761 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4762 {
4763+ if ((long)n < 0)
4764+ return n;
4765+
4766+ if (!__builtin_constant_p(n))
4767+ check_object_size(from, n, true);
4768+
4769 return __copy_user(to, (__force void __user *) from, n);
4770 }
4771
4772 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774- if (n && __access_ok((unsigned long) from, n))
4775+ if ((long)n < 0)
4776+ return n;
4777+
4778+ if (n && __access_ok((unsigned long) from, n)) {
4779+ if (!__builtin_constant_p(n))
4780+ check_object_size(to, n, false);
4781 return __copy_user((__force void __user *) to, from, n);
4782- else
4783+ } else
4784 return n;
4785 }
4786
4787 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4788 {
4789+ if ((long)n < 0)
4790+ return n;
4791+
4792 return __copy_user((__force void __user *) to, from, n);
4793 }
4794
4795diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4796index 9ea271e..7b8a271 100644
4797--- a/arch/sparc/include/asm/uaccess_64.h
4798+++ b/arch/sparc/include/asm/uaccess_64.h
4799@@ -9,6 +9,7 @@
4800 #include <linux/compiler.h>
4801 #include <linux/string.h>
4802 #include <linux/thread_info.h>
4803+#include <linux/kernel.h>
4804 #include <asm/asi.h>
4805 #include <asm/system.h>
4806 #include <asm/spitfire.h>
4807@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4808 static inline unsigned long __must_check
4809 copy_from_user(void *to, const void __user *from, unsigned long size)
4810 {
4811- unsigned long ret = ___copy_from_user(to, from, size);
4812+ unsigned long ret;
4813
4814+ if ((long)size < 0 || size > INT_MAX)
4815+ return size;
4816+
4817+ if (!__builtin_constant_p(size))
4818+ check_object_size(to, size, false);
4819+
4820+ ret = ___copy_from_user(to, from, size);
4821 if (unlikely(ret))
4822 ret = copy_from_user_fixup(to, from, size);
4823 return ret;
4824@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4825 static inline unsigned long __must_check
4826 copy_to_user(void __user *to, const void *from, unsigned long size)
4827 {
4828- unsigned long ret = ___copy_to_user(to, from, size);
4829+ unsigned long ret;
4830+
4831+ if ((long)size < 0 || size > INT_MAX)
4832+ return size;
4833+
4834+ if (!__builtin_constant_p(size))
4835+ check_object_size(from, size, true);
4836
4837+ ret = ___copy_to_user(to, from, size);
4838 if (unlikely(ret))
4839 ret = copy_to_user_fixup(to, from, size);
4840 return ret;
4841diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4842index 2782681..77ded84 100644
4843--- a/arch/sparc/kernel/Makefile
4844+++ b/arch/sparc/kernel/Makefile
4845@@ -3,7 +3,7 @@
4846 #
4847
4848 asflags-y := -ansi
4849-ccflags-y := -Werror
4850+#ccflags-y := -Werror
4851
4852 extra-y := head_$(BITS).o
4853 extra-y += init_task.o
4854diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4855index 7690cc2..ece64c9 100644
4856--- a/arch/sparc/kernel/iommu.c
4857+++ b/arch/sparc/kernel/iommu.c
4858@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4859 spin_unlock_irqrestore(&iommu->lock, flags);
4860 }
4861
4862-static struct dma_map_ops sun4u_dma_ops = {
4863+static const struct dma_map_ops sun4u_dma_ops = {
4864 .alloc_coherent = dma_4u_alloc_coherent,
4865 .free_coherent = dma_4u_free_coherent,
4866 .map_page = dma_4u_map_page,
4867@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4868 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4869 };
4870
4871-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4872+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4873 EXPORT_SYMBOL(dma_ops);
4874
4875 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4876diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4877index 9f61fd8..bd048db 100644
4878--- a/arch/sparc/kernel/ioport.c
4879+++ b/arch/sparc/kernel/ioport.c
4880@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4881 BUG();
4882 }
4883
4884-struct dma_map_ops sbus_dma_ops = {
4885+const struct dma_map_ops sbus_dma_ops = {
4886 .alloc_coherent = sbus_alloc_coherent,
4887 .free_coherent = sbus_free_coherent,
4888 .map_page = sbus_map_page,
4889@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4890 .sync_sg_for_device = sbus_sync_sg_for_device,
4891 };
4892
4893-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4894+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4895 EXPORT_SYMBOL(dma_ops);
4896
4897 static int __init sparc_register_ioport(void)
4898@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4899 }
4900 }
4901
4902-struct dma_map_ops pci32_dma_ops = {
4903+const struct dma_map_ops pci32_dma_ops = {
4904 .alloc_coherent = pci32_alloc_coherent,
4905 .free_coherent = pci32_free_coherent,
4906 .map_page = pci32_map_page,
4907diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4908index 04df4ed..55c4b6e 100644
4909--- a/arch/sparc/kernel/kgdb_32.c
4910+++ b/arch/sparc/kernel/kgdb_32.c
4911@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4912 {
4913 }
4914
4915-struct kgdb_arch arch_kgdb_ops = {
4916+const struct kgdb_arch arch_kgdb_ops = {
4917 /* Breakpoint instruction: ta 0x7d */
4918 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4919 };
4920diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4921index f5a0fd4..d886f71 100644
4922--- a/arch/sparc/kernel/kgdb_64.c
4923+++ b/arch/sparc/kernel/kgdb_64.c
4924@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4925 {
4926 }
4927
4928-struct kgdb_arch arch_kgdb_ops = {
4929+const struct kgdb_arch arch_kgdb_ops = {
4930 /* Breakpoint instruction: ta 0x72 */
4931 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4932 };
4933diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4934index 23c33ff..d137fbd 100644
4935--- a/arch/sparc/kernel/pci_sun4v.c
4936+++ b/arch/sparc/kernel/pci_sun4v.c
4937@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4938 spin_unlock_irqrestore(&iommu->lock, flags);
4939 }
4940
4941-static struct dma_map_ops sun4v_dma_ops = {
4942+static const struct dma_map_ops sun4v_dma_ops = {
4943 .alloc_coherent = dma_4v_alloc_coherent,
4944 .free_coherent = dma_4v_free_coherent,
4945 .map_page = dma_4v_map_page,
4946diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4947index c49865b..b41a81b 100644
4948--- a/arch/sparc/kernel/process_32.c
4949+++ b/arch/sparc/kernel/process_32.c
4950@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4951 rw->ins[4], rw->ins[5],
4952 rw->ins[6],
4953 rw->ins[7]);
4954- printk("%pS\n", (void *) rw->ins[7]);
4955+ printk("%pA\n", (void *) rw->ins[7]);
4956 rw = (struct reg_window32 *) rw->ins[6];
4957 }
4958 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4959@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4960
4961 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4962 r->psr, r->pc, r->npc, r->y, print_tainted());
4963- printk("PC: <%pS>\n", (void *) r->pc);
4964+ printk("PC: <%pA>\n", (void *) r->pc);
4965 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4966 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4967 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4968 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4969 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4970 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4971- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4972+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4973
4974 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4975 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4976@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4977 rw = (struct reg_window32 *) fp;
4978 pc = rw->ins[7];
4979 printk("[%08lx : ", pc);
4980- printk("%pS ] ", (void *) pc);
4981+ printk("%pA ] ", (void *) pc);
4982 fp = rw->ins[6];
4983 } while (++count < 16);
4984 printk("\n");
4985diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4986index cb70476..3d0c191 100644
4987--- a/arch/sparc/kernel/process_64.c
4988+++ b/arch/sparc/kernel/process_64.c
4989@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4990 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4991 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4992 if (regs->tstate & TSTATE_PRIV)
4993- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4994+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4995 }
4996
4997 void show_regs(struct pt_regs *regs)
4998 {
4999 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5000 regs->tpc, regs->tnpc, regs->y, print_tainted());
5001- printk("TPC: <%pS>\n", (void *) regs->tpc);
5002+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5003 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5004 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5005 regs->u_regs[3]);
5006@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5007 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5008 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5009 regs->u_regs[15]);
5010- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5011+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5012 show_regwindow(regs);
5013 }
5014
5015@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5016 ((tp && tp->task) ? tp->task->pid : -1));
5017
5018 if (gp->tstate & TSTATE_PRIV) {
5019- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5020+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5021 (void *) gp->tpc,
5022 (void *) gp->o7,
5023 (void *) gp->i7,
5024diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5025index 6edc4e5..06a69b4 100644
5026--- a/arch/sparc/kernel/sigutil_64.c
5027+++ b/arch/sparc/kernel/sigutil_64.c
5028@@ -2,6 +2,7 @@
5029 #include <linux/types.h>
5030 #include <linux/thread_info.h>
5031 #include <linux/uaccess.h>
5032+#include <linux/errno.h>
5033
5034 #include <asm/sigcontext.h>
5035 #include <asm/fpumacro.h>
5036diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5037index 3a82e65..ce0a53a 100644
5038--- a/arch/sparc/kernel/sys_sparc_32.c
5039+++ b/arch/sparc/kernel/sys_sparc_32.c
5040@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5041 if (ARCH_SUN4C && len > 0x20000000)
5042 return -ENOMEM;
5043 if (!addr)
5044- addr = TASK_UNMAPPED_BASE;
5045+ addr = current->mm->mmap_base;
5046
5047 if (flags & MAP_SHARED)
5048 addr = COLOUR_ALIGN(addr);
5049@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5050 }
5051 if (TASK_SIZE - PAGE_SIZE - len < addr)
5052 return -ENOMEM;
5053- if (!vmm || addr + len <= vmm->vm_start)
5054+ if (check_heap_stack_gap(vmm, addr, len))
5055 return addr;
5056 addr = vmm->vm_end;
5057 if (flags & MAP_SHARED)
5058diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5059index cfa0e19..98972ac 100644
5060--- a/arch/sparc/kernel/sys_sparc_64.c
5061+++ b/arch/sparc/kernel/sys_sparc_64.c
5062@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5063 /* We do not accept a shared mapping if it would violate
5064 * cache aliasing constraints.
5065 */
5066- if ((flags & MAP_SHARED) &&
5067+ if ((filp || (flags & MAP_SHARED)) &&
5068 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5069 return -EINVAL;
5070 return addr;
5071@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5072 if (filp || (flags & MAP_SHARED))
5073 do_color_align = 1;
5074
5075+#ifdef CONFIG_PAX_RANDMMAP
5076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5077+#endif
5078+
5079 if (addr) {
5080 if (do_color_align)
5081 addr = COLOUR_ALIGN(addr, pgoff);
5082@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5083 addr = PAGE_ALIGN(addr);
5084
5085 vma = find_vma(mm, addr);
5086- if (task_size - len >= addr &&
5087- (!vma || addr + len <= vma->vm_start))
5088+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5089 return addr;
5090 }
5091
5092 if (len > mm->cached_hole_size) {
5093- start_addr = addr = mm->free_area_cache;
5094+ start_addr = addr = mm->free_area_cache;
5095 } else {
5096- start_addr = addr = TASK_UNMAPPED_BASE;
5097+ start_addr = addr = mm->mmap_base;
5098 mm->cached_hole_size = 0;
5099 }
5100
5101@@ -175,14 +178,14 @@ full_search:
5102 vma = find_vma(mm, VA_EXCLUDE_END);
5103 }
5104 if (unlikely(task_size < addr)) {
5105- if (start_addr != TASK_UNMAPPED_BASE) {
5106- start_addr = addr = TASK_UNMAPPED_BASE;
5107+ if (start_addr != mm->mmap_base) {
5108+ start_addr = addr = mm->mmap_base;
5109 mm->cached_hole_size = 0;
5110 goto full_search;
5111 }
5112 return -ENOMEM;
5113 }
5114- if (likely(!vma || addr + len <= vma->vm_start)) {
5115+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5116 /*
5117 * Remember the place where we stopped the search:
5118 */
5119@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5120 /* We do not accept a shared mapping if it would violate
5121 * cache aliasing constraints.
5122 */
5123- if ((flags & MAP_SHARED) &&
5124+ if ((filp || (flags & MAP_SHARED)) &&
5125 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5126 return -EINVAL;
5127 return addr;
5128@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5129 addr = PAGE_ALIGN(addr);
5130
5131 vma = find_vma(mm, addr);
5132- if (task_size - len >= addr &&
5133- (!vma || addr + len <= vma->vm_start))
5134+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5135 return addr;
5136 }
5137
5138@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5139 /* make sure it can fit in the remaining address space */
5140 if (likely(addr > len)) {
5141 vma = find_vma(mm, addr-len);
5142- if (!vma || addr <= vma->vm_start) {
5143+ if (check_heap_stack_gap(vma, addr - len, len)) {
5144 /* remember the address as a hint for next time */
5145 return (mm->free_area_cache = addr-len);
5146 }
5147@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5148 if (unlikely(mm->mmap_base < len))
5149 goto bottomup;
5150
5151- addr = mm->mmap_base-len;
5152- if (do_color_align)
5153- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5154+ addr = mm->mmap_base - len;
5155
5156 do {
5157+ if (do_color_align)
5158+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5159 /*
5160 * Lookup failure means no vma is above this address,
5161 * else if new region fits below vma->vm_start,
5162 * return with success:
5163 */
5164 vma = find_vma(mm, addr);
5165- if (likely(!vma || addr+len <= vma->vm_start)) {
5166+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5167 /* remember the address as a hint for next time */
5168 return (mm->free_area_cache = addr);
5169 }
5170@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5171 mm->cached_hole_size = vma->vm_start - addr;
5172
5173 /* try just below the current vma->vm_start */
5174- addr = vma->vm_start-len;
5175- if (do_color_align)
5176- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5177- } while (likely(len < vma->vm_start));
5178+ addr = skip_heap_stack_gap(vma, len);
5179+ } while (!IS_ERR_VALUE(addr));
5180
5181 bottomup:
5182 /*
5183@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5184 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5185 sysctl_legacy_va_layout) {
5186 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5187+
5188+#ifdef CONFIG_PAX_RANDMMAP
5189+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5190+ mm->mmap_base += mm->delta_mmap;
5191+#endif
5192+
5193 mm->get_unmapped_area = arch_get_unmapped_area;
5194 mm->unmap_area = arch_unmap_area;
5195 } else {
5196@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5197 gap = (task_size / 6 * 5);
5198
5199 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5200+
5201+#ifdef CONFIG_PAX_RANDMMAP
5202+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5203+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5204+#endif
5205+
5206 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5207 mm->unmap_area = arch_unmap_area_topdown;
5208 }
5209diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5210index c0490c7..84959d1 100644
5211--- a/arch/sparc/kernel/traps_32.c
5212+++ b/arch/sparc/kernel/traps_32.c
5213@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5214 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5215 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5216
5217+extern void gr_handle_kernel_exploit(void);
5218+
5219 void die_if_kernel(char *str, struct pt_regs *regs)
5220 {
5221 static int die_counter;
5222@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5223 count++ < 30 &&
5224 (((unsigned long) rw) >= PAGE_OFFSET) &&
5225 !(((unsigned long) rw) & 0x7)) {
5226- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5227+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5228 (void *) rw->ins[7]);
5229 rw = (struct reg_window32 *)rw->ins[6];
5230 }
5231 }
5232 printk("Instruction DUMP:");
5233 instruction_dump ((unsigned long *) regs->pc);
5234- if(regs->psr & PSR_PS)
5235+ if(regs->psr & PSR_PS) {
5236+ gr_handle_kernel_exploit();
5237 do_exit(SIGKILL);
5238+ }
5239 do_exit(SIGSEGV);
5240 }
5241
5242diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5243index 10f7bb9..cdb6793 100644
5244--- a/arch/sparc/kernel/traps_64.c
5245+++ b/arch/sparc/kernel/traps_64.c
5246@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5247 i + 1,
5248 p->trapstack[i].tstate, p->trapstack[i].tpc,
5249 p->trapstack[i].tnpc, p->trapstack[i].tt);
5250- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5251+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5252 }
5253 }
5254
5255@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5256
5257 lvl -= 0x100;
5258 if (regs->tstate & TSTATE_PRIV) {
5259+
5260+#ifdef CONFIG_PAX_REFCOUNT
5261+ if (lvl == 6)
5262+ pax_report_refcount_overflow(regs);
5263+#endif
5264+
5265 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5266 die_if_kernel(buffer, regs);
5267 }
5268@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5269 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5270 {
5271 char buffer[32];
5272-
5273+
5274 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5275 0, lvl, SIGTRAP) == NOTIFY_STOP)
5276 return;
5277
5278+#ifdef CONFIG_PAX_REFCOUNT
5279+ if (lvl == 6)
5280+ pax_report_refcount_overflow(regs);
5281+#endif
5282+
5283 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5284
5285 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5286@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5287 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5288 printk("%s" "ERROR(%d): ",
5289 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5290- printk("TPC<%pS>\n", (void *) regs->tpc);
5291+ printk("TPC<%pA>\n", (void *) regs->tpc);
5292 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5294 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5295@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5296 smp_processor_id(),
5297 (type & 0x1) ? 'I' : 'D',
5298 regs->tpc);
5299- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5300+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5301 panic("Irrecoverable Cheetah+ parity error.");
5302 }
5303
5304@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5305 smp_processor_id(),
5306 (type & 0x1) ? 'I' : 'D',
5307 regs->tpc);
5308- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5309+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5310 }
5311
5312 struct sun4v_error_entry {
5313@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5314
5315 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5316 regs->tpc, tl);
5317- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5318+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5319 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5320- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5321+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5322 (void *) regs->u_regs[UREG_I7]);
5323 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5324 "pte[%lx] error[%lx]\n",
5325@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5326
5327 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5328 regs->tpc, tl);
5329- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5330+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5331 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5332- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5333+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5334 (void *) regs->u_regs[UREG_I7]);
5335 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5336 "pte[%lx] error[%lx]\n",
5337@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5338 fp = (unsigned long)sf->fp + STACK_BIAS;
5339 }
5340
5341- printk(" [%016lx] %pS\n", pc, (void *) pc);
5342+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5343 } while (++count < 16);
5344 }
5345
5346@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5347 return (struct reg_window *) (fp + STACK_BIAS);
5348 }
5349
5350+extern void gr_handle_kernel_exploit(void);
5351+
5352 void die_if_kernel(char *str, struct pt_regs *regs)
5353 {
5354 static int die_counter;
5355@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5356 while (rw &&
5357 count++ < 30&&
5358 is_kernel_stack(current, rw)) {
5359- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5360+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5361 (void *) rw->ins[7]);
5362
5363 rw = kernel_stack_up(rw);
5364@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5365 }
5366 user_instruction_dump ((unsigned int __user *) regs->tpc);
5367 }
5368- if (regs->tstate & TSTATE_PRIV)
5369+ if (regs->tstate & TSTATE_PRIV) {
5370+ gr_handle_kernel_exploit();
5371 do_exit(SIGKILL);
5372+ }
5373+
5374 do_exit(SIGSEGV);
5375 }
5376 EXPORT_SYMBOL(die_if_kernel);
5377diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5378index be183fe..1c8d332 100644
5379--- a/arch/sparc/kernel/una_asm_64.S
5380+++ b/arch/sparc/kernel/una_asm_64.S
5381@@ -127,7 +127,7 @@ do_int_load:
5382 wr %o5, 0x0, %asi
5383 retl
5384 mov 0, %o0
5385- .size __do_int_load, .-__do_int_load
5386+ .size do_int_load, .-do_int_load
5387
5388 .section __ex_table,"a"
5389 .word 4b, __retl_efault
5390diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5391index 3792099..2af17d8 100644
5392--- a/arch/sparc/kernel/unaligned_64.c
5393+++ b/arch/sparc/kernel/unaligned_64.c
5394@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5395 if (count < 5) {
5396 last_time = jiffies;
5397 count++;
5398- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5399+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5400 regs->tpc, (void *) regs->tpc);
5401 }
5402 }
5403diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5404index e75faf0..24f12f9 100644
5405--- a/arch/sparc/lib/Makefile
5406+++ b/arch/sparc/lib/Makefile
5407@@ -2,7 +2,7 @@
5408 #
5409
5410 asflags-y := -ansi -DST_DIV0=0x02
5411-ccflags-y := -Werror
5412+#ccflags-y := -Werror
5413
5414 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5415 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5416diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5417index 0268210..f0291ca 100644
5418--- a/arch/sparc/lib/atomic_64.S
5419+++ b/arch/sparc/lib/atomic_64.S
5420@@ -18,7 +18,12 @@
5421 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5422 BACKOFF_SETUP(%o2)
5423 1: lduw [%o1], %g1
5424- add %g1, %o0, %g7
5425+ addcc %g1, %o0, %g7
5426+
5427+#ifdef CONFIG_PAX_REFCOUNT
5428+ tvs %icc, 6
5429+#endif
5430+
5431 cas [%o1], %g1, %g7
5432 cmp %g1, %g7
5433 bne,pn %icc, 2f
5434@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5435 2: BACKOFF_SPIN(%o2, %o3, 1b)
5436 .size atomic_add, .-atomic_add
5437
5438+ .globl atomic_add_unchecked
5439+ .type atomic_add_unchecked,#function
5440+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5441+ BACKOFF_SETUP(%o2)
5442+1: lduw [%o1], %g1
5443+ add %g1, %o0, %g7
5444+ cas [%o1], %g1, %g7
5445+ cmp %g1, %g7
5446+ bne,pn %icc, 2f
5447+ nop
5448+ retl
5449+ nop
5450+2: BACKOFF_SPIN(%o2, %o3, 1b)
5451+ .size atomic_add_unchecked, .-atomic_add_unchecked
5452+
5453 .globl atomic_sub
5454 .type atomic_sub,#function
5455 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5456 BACKOFF_SETUP(%o2)
5457 1: lduw [%o1], %g1
5458- sub %g1, %o0, %g7
5459+ subcc %g1, %o0, %g7
5460+
5461+#ifdef CONFIG_PAX_REFCOUNT
5462+ tvs %icc, 6
5463+#endif
5464+
5465 cas [%o1], %g1, %g7
5466 cmp %g1, %g7
5467 bne,pn %icc, 2f
5468@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5469 2: BACKOFF_SPIN(%o2, %o3, 1b)
5470 .size atomic_sub, .-atomic_sub
5471
5472+ .globl atomic_sub_unchecked
5473+ .type atomic_sub_unchecked,#function
5474+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5475+ BACKOFF_SETUP(%o2)
5476+1: lduw [%o1], %g1
5477+ sub %g1, %o0, %g7
5478+ cas [%o1], %g1, %g7
5479+ cmp %g1, %g7
5480+ bne,pn %icc, 2f
5481+ nop
5482+ retl
5483+ nop
5484+2: BACKOFF_SPIN(%o2, %o3, 1b)
5485+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5486+
5487 .globl atomic_add_ret
5488 .type atomic_add_ret,#function
5489 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5490 BACKOFF_SETUP(%o2)
5491 1: lduw [%o1], %g1
5492- add %g1, %o0, %g7
5493+ addcc %g1, %o0, %g7
5494+
5495+#ifdef CONFIG_PAX_REFCOUNT
5496+ tvs %icc, 6
5497+#endif
5498+
5499 cas [%o1], %g1, %g7
5500 cmp %g1, %g7
5501 bne,pn %icc, 2f
5502@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5503 2: BACKOFF_SPIN(%o2, %o3, 1b)
5504 .size atomic_add_ret, .-atomic_add_ret
5505
5506+ .globl atomic_add_ret_unchecked
5507+ .type atomic_add_ret_unchecked,#function
5508+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5509+ BACKOFF_SETUP(%o2)
5510+1: lduw [%o1], %g1
5511+ addcc %g1, %o0, %g7
5512+ cas [%o1], %g1, %g7
5513+ cmp %g1, %g7
5514+ bne,pn %icc, 2f
5515+ add %g7, %o0, %g7
5516+ sra %g7, 0, %o0
5517+ retl
5518+ nop
5519+2: BACKOFF_SPIN(%o2, %o3, 1b)
5520+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5521+
5522 .globl atomic_sub_ret
5523 .type atomic_sub_ret,#function
5524 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5525 BACKOFF_SETUP(%o2)
5526 1: lduw [%o1], %g1
5527- sub %g1, %o0, %g7
5528+ subcc %g1, %o0, %g7
5529+
5530+#ifdef CONFIG_PAX_REFCOUNT
5531+ tvs %icc, 6
5532+#endif
5533+
5534 cas [%o1], %g1, %g7
5535 cmp %g1, %g7
5536 bne,pn %icc, 2f
5537@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5538 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5539 BACKOFF_SETUP(%o2)
5540 1: ldx [%o1], %g1
5541- add %g1, %o0, %g7
5542+ addcc %g1, %o0, %g7
5543+
5544+#ifdef CONFIG_PAX_REFCOUNT
5545+ tvs %xcc, 6
5546+#endif
5547+
5548 casx [%o1], %g1, %g7
5549 cmp %g1, %g7
5550 bne,pn %xcc, 2f
5551@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5552 2: BACKOFF_SPIN(%o2, %o3, 1b)
5553 .size atomic64_add, .-atomic64_add
5554
5555+ .globl atomic64_add_unchecked
5556+ .type atomic64_add_unchecked,#function
5557+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5558+ BACKOFF_SETUP(%o2)
5559+1: ldx [%o1], %g1
5560+ addcc %g1, %o0, %g7
5561+ casx [%o1], %g1, %g7
5562+ cmp %g1, %g7
5563+ bne,pn %xcc, 2f
5564+ nop
5565+ retl
5566+ nop
5567+2: BACKOFF_SPIN(%o2, %o3, 1b)
5568+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5569+
5570 .globl atomic64_sub
5571 .type atomic64_sub,#function
5572 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5573 BACKOFF_SETUP(%o2)
5574 1: ldx [%o1], %g1
5575- sub %g1, %o0, %g7
5576+ subcc %g1, %o0, %g7
5577+
5578+#ifdef CONFIG_PAX_REFCOUNT
5579+ tvs %xcc, 6
5580+#endif
5581+
5582 casx [%o1], %g1, %g7
5583 cmp %g1, %g7
5584 bne,pn %xcc, 2f
5585@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5586 2: BACKOFF_SPIN(%o2, %o3, 1b)
5587 .size atomic64_sub, .-atomic64_sub
5588
5589+ .globl atomic64_sub_unchecked
5590+ .type atomic64_sub_unchecked,#function
5591+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5592+ BACKOFF_SETUP(%o2)
5593+1: ldx [%o1], %g1
5594+ subcc %g1, %o0, %g7
5595+ casx [%o1], %g1, %g7
5596+ cmp %g1, %g7
5597+ bne,pn %xcc, 2f
5598+ nop
5599+ retl
5600+ nop
5601+2: BACKOFF_SPIN(%o2, %o3, 1b)
5602+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5603+
5604 .globl atomic64_add_ret
5605 .type atomic64_add_ret,#function
5606 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5607 BACKOFF_SETUP(%o2)
5608 1: ldx [%o1], %g1
5609- add %g1, %o0, %g7
5610+ addcc %g1, %o0, %g7
5611+
5612+#ifdef CONFIG_PAX_REFCOUNT
5613+ tvs %xcc, 6
5614+#endif
5615+
5616 casx [%o1], %g1, %g7
5617 cmp %g1, %g7
5618 bne,pn %xcc, 2f
5619@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5620 2: BACKOFF_SPIN(%o2, %o3, 1b)
5621 .size atomic64_add_ret, .-atomic64_add_ret
5622
5623+ .globl atomic64_add_ret_unchecked
5624+ .type atomic64_add_ret_unchecked,#function
5625+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5626+ BACKOFF_SETUP(%o2)
5627+1: ldx [%o1], %g1
5628+ addcc %g1, %o0, %g7
5629+ casx [%o1], %g1, %g7
5630+ cmp %g1, %g7
5631+ bne,pn %xcc, 2f
5632+ add %g7, %o0, %g7
5633+ mov %g7, %o0
5634+ retl
5635+ nop
5636+2: BACKOFF_SPIN(%o2, %o3, 1b)
5637+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5638+
5639 .globl atomic64_sub_ret
5640 .type atomic64_sub_ret,#function
5641 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5642 BACKOFF_SETUP(%o2)
5643 1: ldx [%o1], %g1
5644- sub %g1, %o0, %g7
5645+ subcc %g1, %o0, %g7
5646+
5647+#ifdef CONFIG_PAX_REFCOUNT
5648+ tvs %xcc, 6
5649+#endif
5650+
5651 casx [%o1], %g1, %g7
5652 cmp %g1, %g7
5653 bne,pn %xcc, 2f
5654diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5655index 704b126..2e79d76 100644
5656--- a/arch/sparc/lib/ksyms.c
5657+++ b/arch/sparc/lib/ksyms.c
5658@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5659
5660 /* Atomic counter implementation. */
5661 EXPORT_SYMBOL(atomic_add);
5662+EXPORT_SYMBOL(atomic_add_unchecked);
5663 EXPORT_SYMBOL(atomic_add_ret);
5664+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5665 EXPORT_SYMBOL(atomic_sub);
5666+EXPORT_SYMBOL(atomic_sub_unchecked);
5667 EXPORT_SYMBOL(atomic_sub_ret);
5668 EXPORT_SYMBOL(atomic64_add);
5669+EXPORT_SYMBOL(atomic64_add_unchecked);
5670 EXPORT_SYMBOL(atomic64_add_ret);
5671+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5672 EXPORT_SYMBOL(atomic64_sub);
5673+EXPORT_SYMBOL(atomic64_sub_unchecked);
5674 EXPORT_SYMBOL(atomic64_sub_ret);
5675
5676 /* Atomic bit operations. */
5677diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5678index 91a7d29..ce75c29 100644
5679--- a/arch/sparc/lib/rwsem_64.S
5680+++ b/arch/sparc/lib/rwsem_64.S
5681@@ -11,7 +11,12 @@
5682 .globl __down_read
5683 __down_read:
5684 1: lduw [%o0], %g1
5685- add %g1, 1, %g7
5686+ addcc %g1, 1, %g7
5687+
5688+#ifdef CONFIG_PAX_REFCOUNT
5689+ tvs %icc, 6
5690+#endif
5691+
5692 cas [%o0], %g1, %g7
5693 cmp %g1, %g7
5694 bne,pn %icc, 1b
5695@@ -33,7 +38,12 @@ __down_read:
5696 .globl __down_read_trylock
5697 __down_read_trylock:
5698 1: lduw [%o0], %g1
5699- add %g1, 1, %g7
5700+ addcc %g1, 1, %g7
5701+
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ tvs %icc, 6
5704+#endif
5705+
5706 cmp %g7, 0
5707 bl,pn %icc, 2f
5708 mov 0, %o1
5709@@ -51,7 +61,12 @@ __down_write:
5710 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5711 1:
5712 lduw [%o0], %g3
5713- add %g3, %g1, %g7
5714+ addcc %g3, %g1, %g7
5715+
5716+#ifdef CONFIG_PAX_REFCOUNT
5717+ tvs %icc, 6
5718+#endif
5719+
5720 cas [%o0], %g3, %g7
5721 cmp %g3, %g7
5722 bne,pn %icc, 1b
5723@@ -77,7 +92,12 @@ __down_write_trylock:
5724 cmp %g3, 0
5725 bne,pn %icc, 2f
5726 mov 0, %o1
5727- add %g3, %g1, %g7
5728+ addcc %g3, %g1, %g7
5729+
5730+#ifdef CONFIG_PAX_REFCOUNT
5731+ tvs %icc, 6
5732+#endif
5733+
5734 cas [%o0], %g3, %g7
5735 cmp %g3, %g7
5736 bne,pn %icc, 1b
5737@@ -90,7 +110,12 @@ __down_write_trylock:
5738 __up_read:
5739 1:
5740 lduw [%o0], %g1
5741- sub %g1, 1, %g7
5742+ subcc %g1, 1, %g7
5743+
5744+#ifdef CONFIG_PAX_REFCOUNT
5745+ tvs %icc, 6
5746+#endif
5747+
5748 cas [%o0], %g1, %g7
5749 cmp %g1, %g7
5750 bne,pn %icc, 1b
5751@@ -118,7 +143,12 @@ __up_write:
5752 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5753 1:
5754 lduw [%o0], %g3
5755- sub %g3, %g1, %g7
5756+ subcc %g3, %g1, %g7
5757+
5758+#ifdef CONFIG_PAX_REFCOUNT
5759+ tvs %icc, 6
5760+#endif
5761+
5762 cas [%o0], %g3, %g7
5763 cmp %g3, %g7
5764 bne,pn %icc, 1b
5765@@ -143,7 +173,12 @@ __downgrade_write:
5766 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5767 1:
5768 lduw [%o0], %g3
5769- sub %g3, %g1, %g7
5770+ subcc %g3, %g1, %g7
5771+
5772+#ifdef CONFIG_PAX_REFCOUNT
5773+ tvs %icc, 6
5774+#endif
5775+
5776 cas [%o0], %g3, %g7
5777 cmp %g3, %g7
5778 bne,pn %icc, 1b
5779diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5780index 79836a7..62f47a2 100644
5781--- a/arch/sparc/mm/Makefile
5782+++ b/arch/sparc/mm/Makefile
5783@@ -2,7 +2,7 @@
5784 #
5785
5786 asflags-y := -ansi
5787-ccflags-y := -Werror
5788+#ccflags-y := -Werror
5789
5790 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5791 obj-y += fault_$(BITS).o
5792diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5793index b99f81c..3453e93 100644
5794--- a/arch/sparc/mm/fault_32.c
5795+++ b/arch/sparc/mm/fault_32.c
5796@@ -21,6 +21,9 @@
5797 #include <linux/interrupt.h>
5798 #include <linux/module.h>
5799 #include <linux/kdebug.h>
5800+#include <linux/slab.h>
5801+#include <linux/pagemap.h>
5802+#include <linux/compiler.h>
5803
5804 #include <asm/system.h>
5805 #include <asm/page.h>
5806@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5807 return safe_compute_effective_address(regs, insn);
5808 }
5809
5810+#ifdef CONFIG_PAX_PAGEEXEC
5811+#ifdef CONFIG_PAX_DLRESOLVE
5812+static void pax_emuplt_close(struct vm_area_struct *vma)
5813+{
5814+ vma->vm_mm->call_dl_resolve = 0UL;
5815+}
5816+
5817+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5818+{
5819+ unsigned int *kaddr;
5820+
5821+ vmf->page = alloc_page(GFP_HIGHUSER);
5822+ if (!vmf->page)
5823+ return VM_FAULT_OOM;
5824+
5825+ kaddr = kmap(vmf->page);
5826+ memset(kaddr, 0, PAGE_SIZE);
5827+ kaddr[0] = 0x9DE3BFA8U; /* save */
5828+ flush_dcache_page(vmf->page);
5829+ kunmap(vmf->page);
5830+ return VM_FAULT_MAJOR;
5831+}
5832+
5833+static const struct vm_operations_struct pax_vm_ops = {
5834+ .close = pax_emuplt_close,
5835+ .fault = pax_emuplt_fault
5836+};
5837+
5838+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5839+{
5840+ int ret;
5841+
5842+ vma->vm_mm = current->mm;
5843+ vma->vm_start = addr;
5844+ vma->vm_end = addr + PAGE_SIZE;
5845+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5846+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5847+ vma->vm_ops = &pax_vm_ops;
5848+
5849+ ret = insert_vm_struct(current->mm, vma);
5850+ if (ret)
5851+ return ret;
5852+
5853+ ++current->mm->total_vm;
5854+ return 0;
5855+}
5856+#endif
5857+
5858+/*
5859+ * PaX: decide what to do with offenders (regs->pc = fault address)
5860+ *
5861+ * returns 1 when task should be killed
5862+ * 2 when patched PLT trampoline was detected
5863+ * 3 when unpatched PLT trampoline was detected
5864+ */
5865+static int pax_handle_fetch_fault(struct pt_regs *regs)
5866+{
5867+
5868+#ifdef CONFIG_PAX_EMUPLT
5869+ int err;
5870+
5871+ do { /* PaX: patched PLT emulation #1 */
5872+ unsigned int sethi1, sethi2, jmpl;
5873+
5874+ err = get_user(sethi1, (unsigned int *)regs->pc);
5875+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5876+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5877+
5878+ if (err)
5879+ break;
5880+
5881+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5882+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5883+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5884+ {
5885+ unsigned int addr;
5886+
5887+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5888+ addr = regs->u_regs[UREG_G1];
5889+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5890+ regs->pc = addr;
5891+ regs->npc = addr+4;
5892+ return 2;
5893+ }
5894+ } while (0);
5895+
5896+ { /* PaX: patched PLT emulation #2 */
5897+ unsigned int ba;
5898+
5899+ err = get_user(ba, (unsigned int *)regs->pc);
5900+
5901+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5902+ unsigned int addr;
5903+
5904+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5905+ regs->pc = addr;
5906+ regs->npc = addr+4;
5907+ return 2;
5908+ }
5909+ }
5910+
5911+ do { /* PaX: patched PLT emulation #3 */
5912+ unsigned int sethi, jmpl, nop;
5913+
5914+ err = get_user(sethi, (unsigned int *)regs->pc);
5915+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5916+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5917+
5918+ if (err)
5919+ break;
5920+
5921+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5922+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5923+ nop == 0x01000000U)
5924+ {
5925+ unsigned int addr;
5926+
5927+ addr = (sethi & 0x003FFFFFU) << 10;
5928+ regs->u_regs[UREG_G1] = addr;
5929+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5930+ regs->pc = addr;
5931+ regs->npc = addr+4;
5932+ return 2;
5933+ }
5934+ } while (0);
5935+
5936+ do { /* PaX: unpatched PLT emulation step 1 */
5937+ unsigned int sethi, ba, nop;
5938+
5939+ err = get_user(sethi, (unsigned int *)regs->pc);
5940+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5941+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5942+
5943+ if (err)
5944+ break;
5945+
5946+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5947+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5948+ nop == 0x01000000U)
5949+ {
5950+ unsigned int addr, save, call;
5951+
5952+ if ((ba & 0xFFC00000U) == 0x30800000U)
5953+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5954+ else
5955+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5956+
5957+ err = get_user(save, (unsigned int *)addr);
5958+ err |= get_user(call, (unsigned int *)(addr+4));
5959+ err |= get_user(nop, (unsigned int *)(addr+8));
5960+ if (err)
5961+ break;
5962+
5963+#ifdef CONFIG_PAX_DLRESOLVE
5964+ if (save == 0x9DE3BFA8U &&
5965+ (call & 0xC0000000U) == 0x40000000U &&
5966+ nop == 0x01000000U)
5967+ {
5968+ struct vm_area_struct *vma;
5969+ unsigned long call_dl_resolve;
5970+
5971+ down_read(&current->mm->mmap_sem);
5972+ call_dl_resolve = current->mm->call_dl_resolve;
5973+ up_read(&current->mm->mmap_sem);
5974+ if (likely(call_dl_resolve))
5975+ goto emulate;
5976+
5977+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5978+
5979+ down_write(&current->mm->mmap_sem);
5980+ if (current->mm->call_dl_resolve) {
5981+ call_dl_resolve = current->mm->call_dl_resolve;
5982+ up_write(&current->mm->mmap_sem);
5983+ if (vma)
5984+ kmem_cache_free(vm_area_cachep, vma);
5985+ goto emulate;
5986+ }
5987+
5988+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5989+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5990+ up_write(&current->mm->mmap_sem);
5991+ if (vma)
5992+ kmem_cache_free(vm_area_cachep, vma);
5993+ return 1;
5994+ }
5995+
5996+ if (pax_insert_vma(vma, call_dl_resolve)) {
5997+ up_write(&current->mm->mmap_sem);
5998+ kmem_cache_free(vm_area_cachep, vma);
5999+ return 1;
6000+ }
6001+
6002+ current->mm->call_dl_resolve = call_dl_resolve;
6003+ up_write(&current->mm->mmap_sem);
6004+
6005+emulate:
6006+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6007+ regs->pc = call_dl_resolve;
6008+ regs->npc = addr+4;
6009+ return 3;
6010+ }
6011+#endif
6012+
6013+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6014+ if ((save & 0xFFC00000U) == 0x05000000U &&
6015+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6016+ nop == 0x01000000U)
6017+ {
6018+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6019+ regs->u_regs[UREG_G2] = addr + 4;
6020+ addr = (save & 0x003FFFFFU) << 10;
6021+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6022+ regs->pc = addr;
6023+ regs->npc = addr+4;
6024+ return 3;
6025+ }
6026+ }
6027+ } while (0);
6028+
6029+ do { /* PaX: unpatched PLT emulation step 2 */
6030+ unsigned int save, call, nop;
6031+
6032+ err = get_user(save, (unsigned int *)(regs->pc-4));
6033+ err |= get_user(call, (unsigned int *)regs->pc);
6034+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6035+ if (err)
6036+ break;
6037+
6038+ if (save == 0x9DE3BFA8U &&
6039+ (call & 0xC0000000U) == 0x40000000U &&
6040+ nop == 0x01000000U)
6041+ {
6042+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6043+
6044+ regs->u_regs[UREG_RETPC] = regs->pc;
6045+ regs->pc = dl_resolve;
6046+ regs->npc = dl_resolve+4;
6047+ return 3;
6048+ }
6049+ } while (0);
6050+#endif
6051+
6052+ return 1;
6053+}
6054+
6055+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6056+{
6057+ unsigned long i;
6058+
6059+ printk(KERN_ERR "PAX: bytes at PC: ");
6060+ for (i = 0; i < 8; i++) {
6061+ unsigned int c;
6062+ if (get_user(c, (unsigned int *)pc+i))
6063+ printk(KERN_CONT "???????? ");
6064+ else
6065+ printk(KERN_CONT "%08x ", c);
6066+ }
6067+ printk("\n");
6068+}
6069+#endif
6070+
6071 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6072 unsigned long address)
6073 {
6074@@ -231,6 +495,24 @@ good_area:
6075 if(!(vma->vm_flags & VM_WRITE))
6076 goto bad_area;
6077 } else {
6078+
6079+#ifdef CONFIG_PAX_PAGEEXEC
6080+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6081+ up_read(&mm->mmap_sem);
6082+ switch (pax_handle_fetch_fault(regs)) {
6083+
6084+#ifdef CONFIG_PAX_EMUPLT
6085+ case 2:
6086+ case 3:
6087+ return;
6088+#endif
6089+
6090+ }
6091+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6092+ do_group_exit(SIGKILL);
6093+ }
6094+#endif
6095+
6096 /* Allow reads even for write-only mappings */
6097 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6098 goto bad_area;
6099diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6100index 43b0da9..a0b78f9 100644
6101--- a/arch/sparc/mm/fault_64.c
6102+++ b/arch/sparc/mm/fault_64.c
6103@@ -20,6 +20,9 @@
6104 #include <linux/kprobes.h>
6105 #include <linux/kdebug.h>
6106 #include <linux/percpu.h>
6107+#include <linux/slab.h>
6108+#include <linux/pagemap.h>
6109+#include <linux/compiler.h>
6110
6111 #include <asm/page.h>
6112 #include <asm/pgtable.h>
6113@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6114 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6115 regs->tpc);
6116 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6117- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6118+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6119 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6120 dump_stack();
6121 unhandled_fault(regs->tpc, current, regs);
6122@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6123 show_regs(regs);
6124 }
6125
6126+#ifdef CONFIG_PAX_PAGEEXEC
6127+#ifdef CONFIG_PAX_DLRESOLVE
6128+static void pax_emuplt_close(struct vm_area_struct *vma)
6129+{
6130+ vma->vm_mm->call_dl_resolve = 0UL;
6131+}
6132+
6133+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6134+{
6135+ unsigned int *kaddr;
6136+
6137+ vmf->page = alloc_page(GFP_HIGHUSER);
6138+ if (!vmf->page)
6139+ return VM_FAULT_OOM;
6140+
6141+ kaddr = kmap(vmf->page);
6142+ memset(kaddr, 0, PAGE_SIZE);
6143+ kaddr[0] = 0x9DE3BFA8U; /* save */
6144+ flush_dcache_page(vmf->page);
6145+ kunmap(vmf->page);
6146+ return VM_FAULT_MAJOR;
6147+}
6148+
6149+static const struct vm_operations_struct pax_vm_ops = {
6150+ .close = pax_emuplt_close,
6151+ .fault = pax_emuplt_fault
6152+};
6153+
6154+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6155+{
6156+ int ret;
6157+
6158+ vma->vm_mm = current->mm;
6159+ vma->vm_start = addr;
6160+ vma->vm_end = addr + PAGE_SIZE;
6161+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6162+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6163+ vma->vm_ops = &pax_vm_ops;
6164+
6165+ ret = insert_vm_struct(current->mm, vma);
6166+ if (ret)
6167+ return ret;
6168+
6169+ ++current->mm->total_vm;
6170+ return 0;
6171+}
6172+#endif
6173+
6174+/*
6175+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6176+ *
6177+ * returns 1 when task should be killed
6178+ * 2 when patched PLT trampoline was detected
6179+ * 3 when unpatched PLT trampoline was detected
6180+ */
6181+static int pax_handle_fetch_fault(struct pt_regs *regs)
6182+{
6183+
6184+#ifdef CONFIG_PAX_EMUPLT
6185+ int err;
6186+
6187+ do { /* PaX: patched PLT emulation #1 */
6188+ unsigned int sethi1, sethi2, jmpl;
6189+
6190+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6191+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6192+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6193+
6194+ if (err)
6195+ break;
6196+
6197+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6198+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6199+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6200+ {
6201+ unsigned long addr;
6202+
6203+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6204+ addr = regs->u_regs[UREG_G1];
6205+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6206+
6207+ if (test_thread_flag(TIF_32BIT))
6208+ addr &= 0xFFFFFFFFUL;
6209+
6210+ regs->tpc = addr;
6211+ regs->tnpc = addr+4;
6212+ return 2;
6213+ }
6214+ } while (0);
6215+
6216+ { /* PaX: patched PLT emulation #2 */
6217+ unsigned int ba;
6218+
6219+ err = get_user(ba, (unsigned int *)regs->tpc);
6220+
6221+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6222+ unsigned long addr;
6223+
6224+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6225+
6226+ if (test_thread_flag(TIF_32BIT))
6227+ addr &= 0xFFFFFFFFUL;
6228+
6229+ regs->tpc = addr;
6230+ regs->tnpc = addr+4;
6231+ return 2;
6232+ }
6233+ }
6234+
6235+ do { /* PaX: patched PLT emulation #3 */
6236+ unsigned int sethi, jmpl, nop;
6237+
6238+ err = get_user(sethi, (unsigned int *)regs->tpc);
6239+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6240+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6241+
6242+ if (err)
6243+ break;
6244+
6245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6246+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6247+ nop == 0x01000000U)
6248+ {
6249+ unsigned long addr;
6250+
6251+ addr = (sethi & 0x003FFFFFU) << 10;
6252+ regs->u_regs[UREG_G1] = addr;
6253+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6254+
6255+ if (test_thread_flag(TIF_32BIT))
6256+ addr &= 0xFFFFFFFFUL;
6257+
6258+ regs->tpc = addr;
6259+ regs->tnpc = addr+4;
6260+ return 2;
6261+ }
6262+ } while (0);
6263+
6264+ do { /* PaX: patched PLT emulation #4 */
6265+ unsigned int sethi, mov1, call, mov2;
6266+
6267+ err = get_user(sethi, (unsigned int *)regs->tpc);
6268+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6269+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6270+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6271+
6272+ if (err)
6273+ break;
6274+
6275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6276+ mov1 == 0x8210000FU &&
6277+ (call & 0xC0000000U) == 0x40000000U &&
6278+ mov2 == 0x9E100001U)
6279+ {
6280+ unsigned long addr;
6281+
6282+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6283+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6284+
6285+ if (test_thread_flag(TIF_32BIT))
6286+ addr &= 0xFFFFFFFFUL;
6287+
6288+ regs->tpc = addr;
6289+ regs->tnpc = addr+4;
6290+ return 2;
6291+ }
6292+ } while (0);
6293+
6294+ do { /* PaX: patched PLT emulation #5 */
6295+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6296+
6297+ err = get_user(sethi, (unsigned int *)regs->tpc);
6298+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6299+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6300+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6301+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6302+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6303+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6304+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6305+
6306+ if (err)
6307+ break;
6308+
6309+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6310+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6311+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6312+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6313+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6314+ sllx == 0x83287020U &&
6315+ jmpl == 0x81C04005U &&
6316+ nop == 0x01000000U)
6317+ {
6318+ unsigned long addr;
6319+
6320+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6321+ regs->u_regs[UREG_G1] <<= 32;
6322+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6323+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6324+ regs->tpc = addr;
6325+ regs->tnpc = addr+4;
6326+ return 2;
6327+ }
6328+ } while (0);
6329+
6330+ do { /* PaX: patched PLT emulation #6 */
6331+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6332+
6333+ err = get_user(sethi, (unsigned int *)regs->tpc);
6334+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6335+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6336+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6337+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6338+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6339+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6340+
6341+ if (err)
6342+ break;
6343+
6344+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6345+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6346+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6347+ sllx == 0x83287020U &&
6348+ (or & 0xFFFFE000U) == 0x8A116000U &&
6349+ jmpl == 0x81C04005U &&
6350+ nop == 0x01000000U)
6351+ {
6352+ unsigned long addr;
6353+
6354+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6355+ regs->u_regs[UREG_G1] <<= 32;
6356+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6357+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6358+ regs->tpc = addr;
6359+ regs->tnpc = addr+4;
6360+ return 2;
6361+ }
6362+ } while (0);
6363+
6364+ do { /* PaX: unpatched PLT emulation step 1 */
6365+ unsigned int sethi, ba, nop;
6366+
6367+ err = get_user(sethi, (unsigned int *)regs->tpc);
6368+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6369+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6370+
6371+ if (err)
6372+ break;
6373+
6374+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6375+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6376+ nop == 0x01000000U)
6377+ {
6378+ unsigned long addr;
6379+ unsigned int save, call;
6380+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6381+
6382+ if ((ba & 0xFFC00000U) == 0x30800000U)
6383+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6384+ else
6385+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6386+
6387+ if (test_thread_flag(TIF_32BIT))
6388+ addr &= 0xFFFFFFFFUL;
6389+
6390+ err = get_user(save, (unsigned int *)addr);
6391+ err |= get_user(call, (unsigned int *)(addr+4));
6392+ err |= get_user(nop, (unsigned int *)(addr+8));
6393+ if (err)
6394+ break;
6395+
6396+#ifdef CONFIG_PAX_DLRESOLVE
6397+ if (save == 0x9DE3BFA8U &&
6398+ (call & 0xC0000000U) == 0x40000000U &&
6399+ nop == 0x01000000U)
6400+ {
6401+ struct vm_area_struct *vma;
6402+ unsigned long call_dl_resolve;
6403+
6404+ down_read(&current->mm->mmap_sem);
6405+ call_dl_resolve = current->mm->call_dl_resolve;
6406+ up_read(&current->mm->mmap_sem);
6407+ if (likely(call_dl_resolve))
6408+ goto emulate;
6409+
6410+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6411+
6412+ down_write(&current->mm->mmap_sem);
6413+ if (current->mm->call_dl_resolve) {
6414+ call_dl_resolve = current->mm->call_dl_resolve;
6415+ up_write(&current->mm->mmap_sem);
6416+ if (vma)
6417+ kmem_cache_free(vm_area_cachep, vma);
6418+ goto emulate;
6419+ }
6420+
6421+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6422+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6423+ up_write(&current->mm->mmap_sem);
6424+ if (vma)
6425+ kmem_cache_free(vm_area_cachep, vma);
6426+ return 1;
6427+ }
6428+
6429+ if (pax_insert_vma(vma, call_dl_resolve)) {
6430+ up_write(&current->mm->mmap_sem);
6431+ kmem_cache_free(vm_area_cachep, vma);
6432+ return 1;
6433+ }
6434+
6435+ current->mm->call_dl_resolve = call_dl_resolve;
6436+ up_write(&current->mm->mmap_sem);
6437+
6438+emulate:
6439+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6440+ regs->tpc = call_dl_resolve;
6441+ regs->tnpc = addr+4;
6442+ return 3;
6443+ }
6444+#endif
6445+
6446+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6447+ if ((save & 0xFFC00000U) == 0x05000000U &&
6448+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6449+ nop == 0x01000000U)
6450+ {
6451+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6452+ regs->u_regs[UREG_G2] = addr + 4;
6453+ addr = (save & 0x003FFFFFU) << 10;
6454+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6455+
6456+ if (test_thread_flag(TIF_32BIT))
6457+ addr &= 0xFFFFFFFFUL;
6458+
6459+ regs->tpc = addr;
6460+ regs->tnpc = addr+4;
6461+ return 3;
6462+ }
6463+
6464+ /* PaX: 64-bit PLT stub */
6465+ err = get_user(sethi1, (unsigned int *)addr);
6466+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6467+ err |= get_user(or1, (unsigned int *)(addr+8));
6468+ err |= get_user(or2, (unsigned int *)(addr+12));
6469+ err |= get_user(sllx, (unsigned int *)(addr+16));
6470+ err |= get_user(add, (unsigned int *)(addr+20));
6471+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6472+ err |= get_user(nop, (unsigned int *)(addr+28));
6473+ if (err)
6474+ break;
6475+
6476+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6477+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6478+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6479+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6480+ sllx == 0x89293020U &&
6481+ add == 0x8A010005U &&
6482+ jmpl == 0x89C14000U &&
6483+ nop == 0x01000000U)
6484+ {
6485+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6486+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6487+ regs->u_regs[UREG_G4] <<= 32;
6488+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6489+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6490+ regs->u_regs[UREG_G4] = addr + 24;
6491+ addr = regs->u_regs[UREG_G5];
6492+ regs->tpc = addr;
6493+ regs->tnpc = addr+4;
6494+ return 3;
6495+ }
6496+ }
6497+ } while (0);
6498+
6499+#ifdef CONFIG_PAX_DLRESOLVE
6500+ do { /* PaX: unpatched PLT emulation step 2 */
6501+ unsigned int save, call, nop;
6502+
6503+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6504+ err |= get_user(call, (unsigned int *)regs->tpc);
6505+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6506+ if (err)
6507+ break;
6508+
6509+ if (save == 0x9DE3BFA8U &&
6510+ (call & 0xC0000000U) == 0x40000000U &&
6511+ nop == 0x01000000U)
6512+ {
6513+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6514+
6515+ if (test_thread_flag(TIF_32BIT))
6516+ dl_resolve &= 0xFFFFFFFFUL;
6517+
6518+ regs->u_regs[UREG_RETPC] = regs->tpc;
6519+ regs->tpc = dl_resolve;
6520+ regs->tnpc = dl_resolve+4;
6521+ return 3;
6522+ }
6523+ } while (0);
6524+#endif
6525+
6526+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6527+ unsigned int sethi, ba, nop;
6528+
6529+ err = get_user(sethi, (unsigned int *)regs->tpc);
6530+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6531+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6532+
6533+ if (err)
6534+ break;
6535+
6536+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6537+ (ba & 0xFFF00000U) == 0x30600000U &&
6538+ nop == 0x01000000U)
6539+ {
6540+ unsigned long addr;
6541+
6542+ addr = (sethi & 0x003FFFFFU) << 10;
6543+ regs->u_regs[UREG_G1] = addr;
6544+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6545+
6546+ if (test_thread_flag(TIF_32BIT))
6547+ addr &= 0xFFFFFFFFUL;
6548+
6549+ regs->tpc = addr;
6550+ regs->tnpc = addr+4;
6551+ return 2;
6552+ }
6553+ } while (0);
6554+
6555+#endif
6556+
6557+ return 1;
6558+}
6559+
6560+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6561+{
6562+ unsigned long i;
6563+
6564+ printk(KERN_ERR "PAX: bytes at PC: ");
6565+ for (i = 0; i < 8; i++) {
6566+ unsigned int c;
6567+ if (get_user(c, (unsigned int *)pc+i))
6568+ printk(KERN_CONT "???????? ");
6569+ else
6570+ printk(KERN_CONT "%08x ", c);
6571+ }
6572+ printk("\n");
6573+}
6574+#endif
6575+
6576 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6577 {
6578 struct mm_struct *mm = current->mm;
6579@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6580 if (!vma)
6581 goto bad_area;
6582
6583+#ifdef CONFIG_PAX_PAGEEXEC
6584+ /* PaX: detect ITLB misses on non-exec pages */
6585+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6586+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6587+ {
6588+ if (address != regs->tpc)
6589+ goto good_area;
6590+
6591+ up_read(&mm->mmap_sem);
6592+ switch (pax_handle_fetch_fault(regs)) {
6593+
6594+#ifdef CONFIG_PAX_EMUPLT
6595+ case 2:
6596+ case 3:
6597+ return;
6598+#endif
6599+
6600+ }
6601+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6602+ do_group_exit(SIGKILL);
6603+ }
6604+#endif
6605+
6606 /* Pure DTLB misses do not tell us whether the fault causing
6607 * load/store/atomic was a write or not, it only says that there
6608 * was no match. So in such a case we (carefully) read the
6609diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6610index f27d103..1b06377 100644
6611--- a/arch/sparc/mm/hugetlbpage.c
6612+++ b/arch/sparc/mm/hugetlbpage.c
6613@@ -69,7 +69,7 @@ full_search:
6614 }
6615 return -ENOMEM;
6616 }
6617- if (likely(!vma || addr + len <= vma->vm_start)) {
6618+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6619 /*
6620 * Remember the place where we stopped the search:
6621 */
6622@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6623 /* make sure it can fit in the remaining address space */
6624 if (likely(addr > len)) {
6625 vma = find_vma(mm, addr-len);
6626- if (!vma || addr <= vma->vm_start) {
6627+ if (check_heap_stack_gap(vma, addr - len, len)) {
6628 /* remember the address as a hint for next time */
6629 return (mm->free_area_cache = addr-len);
6630 }
6631@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6632 if (unlikely(mm->mmap_base < len))
6633 goto bottomup;
6634
6635- addr = (mm->mmap_base-len) & HPAGE_MASK;
6636+ addr = mm->mmap_base - len;
6637
6638 do {
6639+ addr &= HPAGE_MASK;
6640 /*
6641 * Lookup failure means no vma is above this address,
6642 * else if new region fits below vma->vm_start,
6643 * return with success:
6644 */
6645 vma = find_vma(mm, addr);
6646- if (likely(!vma || addr+len <= vma->vm_start)) {
6647+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6648 /* remember the address as a hint for next time */
6649 return (mm->free_area_cache = addr);
6650 }
6651@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6652 mm->cached_hole_size = vma->vm_start - addr;
6653
6654 /* try just below the current vma->vm_start */
6655- addr = (vma->vm_start-len) & HPAGE_MASK;
6656- } while (likely(len < vma->vm_start));
6657+ addr = skip_heap_stack_gap(vma, len);
6658+ } while (!IS_ERR_VALUE(addr));
6659
6660 bottomup:
6661 /*
6662@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6663 if (addr) {
6664 addr = ALIGN(addr, HPAGE_SIZE);
6665 vma = find_vma(mm, addr);
6666- if (task_size - len >= addr &&
6667- (!vma || addr + len <= vma->vm_start))
6668+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6669 return addr;
6670 }
6671 if (mm->get_unmapped_area == arch_get_unmapped_area)
6672diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6673index dc7c3b1..34c0070 100644
6674--- a/arch/sparc/mm/init_32.c
6675+++ b/arch/sparc/mm/init_32.c
6676@@ -317,6 +317,9 @@ extern void device_scan(void);
6677 pgprot_t PAGE_SHARED __read_mostly;
6678 EXPORT_SYMBOL(PAGE_SHARED);
6679
6680+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6681+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6682+
6683 void __init paging_init(void)
6684 {
6685 switch(sparc_cpu_model) {
6686@@ -345,17 +348,17 @@ void __init paging_init(void)
6687
6688 /* Initialize the protection map with non-constant, MMU dependent values. */
6689 protection_map[0] = PAGE_NONE;
6690- protection_map[1] = PAGE_READONLY;
6691- protection_map[2] = PAGE_COPY;
6692- protection_map[3] = PAGE_COPY;
6693+ protection_map[1] = PAGE_READONLY_NOEXEC;
6694+ protection_map[2] = PAGE_COPY_NOEXEC;
6695+ protection_map[3] = PAGE_COPY_NOEXEC;
6696 protection_map[4] = PAGE_READONLY;
6697 protection_map[5] = PAGE_READONLY;
6698 protection_map[6] = PAGE_COPY;
6699 protection_map[7] = PAGE_COPY;
6700 protection_map[8] = PAGE_NONE;
6701- protection_map[9] = PAGE_READONLY;
6702- protection_map[10] = PAGE_SHARED;
6703- protection_map[11] = PAGE_SHARED;
6704+ protection_map[9] = PAGE_READONLY_NOEXEC;
6705+ protection_map[10] = PAGE_SHARED_NOEXEC;
6706+ protection_map[11] = PAGE_SHARED_NOEXEC;
6707 protection_map[12] = PAGE_READONLY;
6708 protection_map[13] = PAGE_READONLY;
6709 protection_map[14] = PAGE_SHARED;
6710diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6711index 509b1ff..bfd7118 100644
6712--- a/arch/sparc/mm/srmmu.c
6713+++ b/arch/sparc/mm/srmmu.c
6714@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6715 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6716 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6717 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6718+
6719+#ifdef CONFIG_PAX_PAGEEXEC
6720+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6721+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6722+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6723+#endif
6724+
6725 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6726 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6727
6728diff --git a/arch/um/Makefile b/arch/um/Makefile
6729index fc633db..5e1a1c2 100644
6730--- a/arch/um/Makefile
6731+++ b/arch/um/Makefile
6732@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6733 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6734 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6735
6736+ifdef CONSTIFY_PLUGIN
6737+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6738+endif
6739+
6740 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6741
6742 #This will adjust *FLAGS accordingly to the platform.
6743diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6744index 6c03acd..a5e0215 100644
6745--- a/arch/um/include/asm/kmap_types.h
6746+++ b/arch/um/include/asm/kmap_types.h
6747@@ -23,6 +23,7 @@ enum km_type {
6748 KM_IRQ1,
6749 KM_SOFTIRQ0,
6750 KM_SOFTIRQ1,
6751+ KM_CLEARPAGE,
6752 KM_TYPE_NR
6753 };
6754
6755diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6756index 4cc9b6c..02e5029 100644
6757--- a/arch/um/include/asm/page.h
6758+++ b/arch/um/include/asm/page.h
6759@@ -14,6 +14,9 @@
6760 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6761 #define PAGE_MASK (~(PAGE_SIZE-1))
6762
6763+#define ktla_ktva(addr) (addr)
6764+#define ktva_ktla(addr) (addr)
6765+
6766 #ifndef __ASSEMBLY__
6767
6768 struct page;
6769diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6770index 4a28a15..654dc2a 100644
6771--- a/arch/um/kernel/process.c
6772+++ b/arch/um/kernel/process.c
6773@@ -393,22 +393,6 @@ int singlestepping(void * t)
6774 return 2;
6775 }
6776
6777-/*
6778- * Only x86 and x86_64 have an arch_align_stack().
6779- * All other arches have "#define arch_align_stack(x) (x)"
6780- * in their asm/system.h
6781- * As this is included in UML from asm-um/system-generic.h,
6782- * we can use it to behave as the subarch does.
6783- */
6784-#ifndef arch_align_stack
6785-unsigned long arch_align_stack(unsigned long sp)
6786-{
6787- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6788- sp -= get_random_int() % 8192;
6789- return sp & ~0xf;
6790-}
6791-#endif
6792-
6793 unsigned long get_wchan(struct task_struct *p)
6794 {
6795 unsigned long stack_page, sp, ip;
6796diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6797index d1b93c4..ae1b7fd 100644
6798--- a/arch/um/sys-i386/shared/sysdep/system.h
6799+++ b/arch/um/sys-i386/shared/sysdep/system.h
6800@@ -17,7 +17,7 @@
6801 # define AT_VECTOR_SIZE_ARCH 1
6802 #endif
6803
6804-extern unsigned long arch_align_stack(unsigned long sp);
6805+#define arch_align_stack(x) ((x) & ~0xfUL)
6806
6807 void default_idle(void);
6808
6809diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6810index 857ca0b..9a2669d 100644
6811--- a/arch/um/sys-i386/syscalls.c
6812+++ b/arch/um/sys-i386/syscalls.c
6813@@ -11,6 +11,21 @@
6814 #include "asm/uaccess.h"
6815 #include "asm/unistd.h"
6816
6817+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6818+{
6819+ unsigned long pax_task_size = TASK_SIZE;
6820+
6821+#ifdef CONFIG_PAX_SEGMEXEC
6822+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6823+ pax_task_size = SEGMEXEC_TASK_SIZE;
6824+#endif
6825+
6826+ if (len > pax_task_size || addr > pax_task_size - len)
6827+ return -EINVAL;
6828+
6829+ return 0;
6830+}
6831+
6832 /*
6833 * Perform the select(nd, in, out, ex, tv) and mmap() system
6834 * calls. Linux/i386 didn't use to be able to handle more than
6835diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6836index d1b93c4..ae1b7fd 100644
6837--- a/arch/um/sys-x86_64/shared/sysdep/system.h
6838+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6839@@ -17,7 +17,7 @@
6840 # define AT_VECTOR_SIZE_ARCH 1
6841 #endif
6842
6843-extern unsigned long arch_align_stack(unsigned long sp);
6844+#define arch_align_stack(x) ((x) & ~0xfUL)
6845
6846 void default_idle(void);
6847
6848diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6849index 73ae02a..f932de5 100644
6850--- a/arch/x86/Kconfig
6851+++ b/arch/x86/Kconfig
6852@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6853
6854 config X86_32_LAZY_GS
6855 def_bool y
6856- depends on X86_32 && !CC_STACKPROTECTOR
6857+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6858
6859 config KTIME_SCALAR
6860 def_bool X86_32
6861@@ -1008,7 +1008,7 @@ choice
6862
6863 config NOHIGHMEM
6864 bool "off"
6865- depends on !X86_NUMAQ
6866+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6867 ---help---
6868 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6869 However, the address space of 32-bit x86 processors is only 4
6870@@ -1045,7 +1045,7 @@ config NOHIGHMEM
6871
6872 config HIGHMEM4G
6873 bool "4GB"
6874- depends on !X86_NUMAQ
6875+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6876 ---help---
6877 Select this if you have a 32-bit processor and between 1 and 4
6878 gigabytes of physical RAM.
6879@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6880 hex
6881 default 0xB0000000 if VMSPLIT_3G_OPT
6882 default 0x80000000 if VMSPLIT_2G
6883- default 0x78000000 if VMSPLIT_2G_OPT
6884+ default 0x70000000 if VMSPLIT_2G_OPT
6885 default 0x40000000 if VMSPLIT_1G
6886 default 0xC0000000
6887 depends on X86_32
6888@@ -1460,6 +1460,7 @@ config SECCOMP
6889
6890 config CC_STACKPROTECTOR
6891 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6892+ depends on X86_64 || !PAX_MEMORY_UDEREF
6893 ---help---
6894 This option turns on the -fstack-protector GCC feature. This
6895 feature puts, at the beginning of functions, a canary value on
6896@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6897 config PHYSICAL_START
6898 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6899 default "0x1000000"
6900+ range 0x400000 0x40000000
6901 ---help---
6902 This gives the physical address where the kernel is loaded.
6903
6904@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6905 hex
6906 prompt "Alignment value to which kernel should be aligned" if X86_32
6907 default "0x1000000"
6908+ range 0x400000 0x1000000 if PAX_KERNEXEC
6909 range 0x2000 0x1000000
6910 ---help---
6911 This value puts the alignment restrictions on physical address
6912@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6913 Say N if you want to disable CPU hotplug.
6914
6915 config COMPAT_VDSO
6916- def_bool y
6917+ def_bool n
6918 prompt "Compat VDSO support"
6919 depends on X86_32 || IA32_EMULATION
6920+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6921 ---help---
6922 Map the 32-bit VDSO to the predictable old-style address too.
6923 ---help---
6924diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6925index 0e566103..1a6b57e 100644
6926--- a/arch/x86/Kconfig.cpu
6927+++ b/arch/x86/Kconfig.cpu
6928@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6929
6930 config X86_F00F_BUG
6931 def_bool y
6932- depends on M586MMX || M586TSC || M586 || M486 || M386
6933+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6934
6935 config X86_WP_WORKS_OK
6936 def_bool y
6937@@ -360,7 +360,7 @@ config X86_POPAD_OK
6938
6939 config X86_ALIGNMENT_16
6940 def_bool y
6941- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6942+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6943
6944 config X86_INTEL_USERCOPY
6945 def_bool y
6946@@ -406,7 +406,7 @@ config X86_CMPXCHG64
6947 # generates cmov.
6948 config X86_CMOV
6949 def_bool y
6950- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6951+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6952
6953 config X86_MINIMUM_CPU_FAMILY
6954 int
6955diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6956index d105f29..c928727 100644
6957--- a/arch/x86/Kconfig.debug
6958+++ b/arch/x86/Kconfig.debug
6959@@ -99,7 +99,7 @@ config X86_PTDUMP
6960 config DEBUG_RODATA
6961 bool "Write protect kernel read-only data structures"
6962 default y
6963- depends on DEBUG_KERNEL
6964+ depends on DEBUG_KERNEL && BROKEN
6965 ---help---
6966 Mark the kernel read-only data as write-protected in the pagetables,
6967 in order to catch accidental (and incorrect) writes to such const
6968diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6969index d2d24c9..0f21f8d 100644
6970--- a/arch/x86/Makefile
6971+++ b/arch/x86/Makefile
6972@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6973 else
6974 BITS := 64
6975 UTS_MACHINE := x86_64
6976+ biarch := $(call cc-option,-m64)
6977 CHECKFLAGS += -D__x86_64__ -m64
6978
6979 KBUILD_AFLAGS += -m64
6980@@ -189,3 +190,12 @@ define archhelp
6981 echo ' FDARGS="..." arguments for the booted kernel'
6982 echo ' FDINITRD=file initrd for the booted kernel'
6983 endef
6984+
6985+define OLD_LD
6986+
6987+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6988+*** Please upgrade your binutils to 2.18 or newer
6989+endef
6990+
6991+archprepare:
6992+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6993diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6994index ec749c2..bbb5319 100644
6995--- a/arch/x86/boot/Makefile
6996+++ b/arch/x86/boot/Makefile
6997@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
6998 $(call cc-option, -fno-stack-protector) \
6999 $(call cc-option, -mpreferred-stack-boundary=2)
7000 KBUILD_CFLAGS += $(call cc-option, -m32)
7001+ifdef CONSTIFY_PLUGIN
7002+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7003+endif
7004 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7005 GCOV_PROFILE := n
7006
7007diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7008index 878e4b9..20537ab 100644
7009--- a/arch/x86/boot/bitops.h
7010+++ b/arch/x86/boot/bitops.h
7011@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7012 u8 v;
7013 const u32 *p = (const u32 *)addr;
7014
7015- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7016+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7017 return v;
7018 }
7019
7020@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7021
7022 static inline void set_bit(int nr, void *addr)
7023 {
7024- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7025+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7026 }
7027
7028 #endif /* BOOT_BITOPS_H */
7029diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7030index 98239d2..f40214c 100644
7031--- a/arch/x86/boot/boot.h
7032+++ b/arch/x86/boot/boot.h
7033@@ -82,7 +82,7 @@ static inline void io_delay(void)
7034 static inline u16 ds(void)
7035 {
7036 u16 seg;
7037- asm("movw %%ds,%0" : "=rm" (seg));
7038+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7039 return seg;
7040 }
7041
7042@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7043 static inline int memcmp(const void *s1, const void *s2, size_t len)
7044 {
7045 u8 diff;
7046- asm("repe; cmpsb; setnz %0"
7047+ asm volatile("repe; cmpsb; setnz %0"
7048 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7049 return diff;
7050 }
7051diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7052index f8ed065..5bf5ff3 100644
7053--- a/arch/x86/boot/compressed/Makefile
7054+++ b/arch/x86/boot/compressed/Makefile
7055@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7056 KBUILD_CFLAGS += $(cflags-y)
7057 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7058 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7059+ifdef CONSTIFY_PLUGIN
7060+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7061+endif
7062
7063 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7064 GCOV_PROFILE := n
7065diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7066index f543b70..b60fba8 100644
7067--- a/arch/x86/boot/compressed/head_32.S
7068+++ b/arch/x86/boot/compressed/head_32.S
7069@@ -76,7 +76,7 @@ ENTRY(startup_32)
7070 notl %eax
7071 andl %eax, %ebx
7072 #else
7073- movl $LOAD_PHYSICAL_ADDR, %ebx
7074+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7075 #endif
7076
7077 /* Target address to relocate to for decompression */
7078@@ -149,7 +149,7 @@ relocated:
7079 * and where it was actually loaded.
7080 */
7081 movl %ebp, %ebx
7082- subl $LOAD_PHYSICAL_ADDR, %ebx
7083+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7084 jz 2f /* Nothing to be done if loaded at compiled addr. */
7085 /*
7086 * Process relocations.
7087@@ -157,8 +157,7 @@ relocated:
7088
7089 1: subl $4, %edi
7090 movl (%edi), %ecx
7091- testl %ecx, %ecx
7092- jz 2f
7093+ jecxz 2f
7094 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7095 jmp 1b
7096 2:
7097diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7098index 077e1b6..2c6b13b 100644
7099--- a/arch/x86/boot/compressed/head_64.S
7100+++ b/arch/x86/boot/compressed/head_64.S
7101@@ -91,7 +91,7 @@ ENTRY(startup_32)
7102 notl %eax
7103 andl %eax, %ebx
7104 #else
7105- movl $LOAD_PHYSICAL_ADDR, %ebx
7106+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7107 #endif
7108
7109 /* Target address to relocate to for decompression */
7110@@ -183,7 +183,7 @@ no_longmode:
7111 hlt
7112 jmp 1b
7113
7114-#include "../../kernel/verify_cpu_64.S"
7115+#include "../../kernel/verify_cpu.S"
7116
7117 /*
7118 * Be careful here startup_64 needs to be at a predictable
7119@@ -234,7 +234,7 @@ ENTRY(startup_64)
7120 notq %rax
7121 andq %rax, %rbp
7122 #else
7123- movq $LOAD_PHYSICAL_ADDR, %rbp
7124+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7125 #endif
7126
7127 /* Target address to relocate to for decompression */
7128diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7129index 842b2a3..f00178b 100644
7130--- a/arch/x86/boot/compressed/misc.c
7131+++ b/arch/x86/boot/compressed/misc.c
7132@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7133 case PT_LOAD:
7134 #ifdef CONFIG_RELOCATABLE
7135 dest = output;
7136- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7137+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7138 #else
7139 dest = (void *)(phdr->p_paddr);
7140 #endif
7141@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7142 error("Destination address too large");
7143 #endif
7144 #ifndef CONFIG_RELOCATABLE
7145- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7146+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7147 error("Wrong destination address");
7148 #endif
7149
7150diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7151index bcbd36c..b1754af 100644
7152--- a/arch/x86/boot/compressed/mkpiggy.c
7153+++ b/arch/x86/boot/compressed/mkpiggy.c
7154@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7155
7156 offs = (olen > ilen) ? olen - ilen : 0;
7157 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7158- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7159+ offs += 64*1024; /* Add 64K bytes slack */
7160 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7161
7162 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7163diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7164index bbeb0c3..f5167ab 100644
7165--- a/arch/x86/boot/compressed/relocs.c
7166+++ b/arch/x86/boot/compressed/relocs.c
7167@@ -10,8 +10,11 @@
7168 #define USE_BSD
7169 #include <endian.h>
7170
7171+#include "../../../../include/linux/autoconf.h"
7172+
7173 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7174 static Elf32_Ehdr ehdr;
7175+static Elf32_Phdr *phdr;
7176 static unsigned long reloc_count, reloc_idx;
7177 static unsigned long *relocs;
7178
7179@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7180
7181 static int is_safe_abs_reloc(const char* sym_name)
7182 {
7183- int i;
7184+ unsigned int i;
7185
7186 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7187 if (!strcmp(sym_name, safe_abs_relocs[i]))
7188@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7189 }
7190 }
7191
7192+static void read_phdrs(FILE *fp)
7193+{
7194+ unsigned int i;
7195+
7196+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7197+ if (!phdr) {
7198+ die("Unable to allocate %d program headers\n",
7199+ ehdr.e_phnum);
7200+ }
7201+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7202+ die("Seek to %d failed: %s\n",
7203+ ehdr.e_phoff, strerror(errno));
7204+ }
7205+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7206+ die("Cannot read ELF program headers: %s\n",
7207+ strerror(errno));
7208+ }
7209+ for(i = 0; i < ehdr.e_phnum; i++) {
7210+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7211+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7212+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7213+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7214+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7215+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7216+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7217+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7218+ }
7219+
7220+}
7221+
7222 static void read_shdrs(FILE *fp)
7223 {
7224- int i;
7225+ unsigned int i;
7226 Elf32_Shdr shdr;
7227
7228 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7229@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7230
7231 static void read_strtabs(FILE *fp)
7232 {
7233- int i;
7234+ unsigned int i;
7235 for (i = 0; i < ehdr.e_shnum; i++) {
7236 struct section *sec = &secs[i];
7237 if (sec->shdr.sh_type != SHT_STRTAB) {
7238@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7239
7240 static void read_symtabs(FILE *fp)
7241 {
7242- int i,j;
7243+ unsigned int i,j;
7244 for (i = 0; i < ehdr.e_shnum; i++) {
7245 struct section *sec = &secs[i];
7246 if (sec->shdr.sh_type != SHT_SYMTAB) {
7247@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7248
7249 static void read_relocs(FILE *fp)
7250 {
7251- int i,j;
7252+ unsigned int i,j;
7253+ uint32_t base;
7254+
7255 for (i = 0; i < ehdr.e_shnum; i++) {
7256 struct section *sec = &secs[i];
7257 if (sec->shdr.sh_type != SHT_REL) {
7258@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7259 die("Cannot read symbol table: %s\n",
7260 strerror(errno));
7261 }
7262+ base = 0;
7263+ for (j = 0; j < ehdr.e_phnum; j++) {
7264+ if (phdr[j].p_type != PT_LOAD )
7265+ continue;
7266+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7267+ continue;
7268+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7269+ break;
7270+ }
7271 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7272 Elf32_Rel *rel = &sec->reltab[j];
7273- rel->r_offset = elf32_to_cpu(rel->r_offset);
7274+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7275 rel->r_info = elf32_to_cpu(rel->r_info);
7276 }
7277 }
7278@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7279
7280 static void print_absolute_symbols(void)
7281 {
7282- int i;
7283+ unsigned int i;
7284 printf("Absolute symbols\n");
7285 printf(" Num: Value Size Type Bind Visibility Name\n");
7286 for (i = 0; i < ehdr.e_shnum; i++) {
7287 struct section *sec = &secs[i];
7288 char *sym_strtab;
7289 Elf32_Sym *sh_symtab;
7290- int j;
7291+ unsigned int j;
7292
7293 if (sec->shdr.sh_type != SHT_SYMTAB) {
7294 continue;
7295@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7296
7297 static void print_absolute_relocs(void)
7298 {
7299- int i, printed = 0;
7300+ unsigned int i, printed = 0;
7301
7302 for (i = 0; i < ehdr.e_shnum; i++) {
7303 struct section *sec = &secs[i];
7304 struct section *sec_applies, *sec_symtab;
7305 char *sym_strtab;
7306 Elf32_Sym *sh_symtab;
7307- int j;
7308+ unsigned int j;
7309 if (sec->shdr.sh_type != SHT_REL) {
7310 continue;
7311 }
7312@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7313
7314 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7315 {
7316- int i;
7317+ unsigned int i;
7318 /* Walk through the relocations */
7319 for (i = 0; i < ehdr.e_shnum; i++) {
7320 char *sym_strtab;
7321 Elf32_Sym *sh_symtab;
7322 struct section *sec_applies, *sec_symtab;
7323- int j;
7324+ unsigned int j;
7325 struct section *sec = &secs[i];
7326
7327 if (sec->shdr.sh_type != SHT_REL) {
7328@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7329 if (sym->st_shndx == SHN_ABS) {
7330 continue;
7331 }
7332+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7333+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7334+ continue;
7335+
7336+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7337+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7338+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7339+ continue;
7340+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7341+ continue;
7342+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7343+ continue;
7344+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7345+ continue;
7346+#endif
7347 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7348 /*
7349 * NONE can be ignored and and PC relative
7350@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7351
7352 static void emit_relocs(int as_text)
7353 {
7354- int i;
7355+ unsigned int i;
7356 /* Count how many relocations I have and allocate space for them. */
7357 reloc_count = 0;
7358 walk_relocs(count_reloc);
7359@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7360 fname, strerror(errno));
7361 }
7362 read_ehdr(fp);
7363+ read_phdrs(fp);
7364 read_shdrs(fp);
7365 read_strtabs(fp);
7366 read_symtabs(fp);
7367diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7368index 4d3ff03..e4972ff 100644
7369--- a/arch/x86/boot/cpucheck.c
7370+++ b/arch/x86/boot/cpucheck.c
7371@@ -74,7 +74,7 @@ static int has_fpu(void)
7372 u16 fcw = -1, fsw = -1;
7373 u32 cr0;
7374
7375- asm("movl %%cr0,%0" : "=r" (cr0));
7376+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7377 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7378 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7379 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7380@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7381 {
7382 u32 f0, f1;
7383
7384- asm("pushfl ; "
7385+ asm volatile("pushfl ; "
7386 "pushfl ; "
7387 "popl %0 ; "
7388 "movl %0,%1 ; "
7389@@ -115,7 +115,7 @@ static void get_flags(void)
7390 set_bit(X86_FEATURE_FPU, cpu.flags);
7391
7392 if (has_eflag(X86_EFLAGS_ID)) {
7393- asm("cpuid"
7394+ asm volatile("cpuid"
7395 : "=a" (max_intel_level),
7396 "=b" (cpu_vendor[0]),
7397 "=d" (cpu_vendor[1]),
7398@@ -124,7 +124,7 @@ static void get_flags(void)
7399
7400 if (max_intel_level >= 0x00000001 &&
7401 max_intel_level <= 0x0000ffff) {
7402- asm("cpuid"
7403+ asm volatile("cpuid"
7404 : "=a" (tfms),
7405 "=c" (cpu.flags[4]),
7406 "=d" (cpu.flags[0])
7407@@ -136,7 +136,7 @@ static void get_flags(void)
7408 cpu.model += ((tfms >> 16) & 0xf) << 4;
7409 }
7410
7411- asm("cpuid"
7412+ asm volatile("cpuid"
7413 : "=a" (max_amd_level)
7414 : "a" (0x80000000)
7415 : "ebx", "ecx", "edx");
7416@@ -144,7 +144,7 @@ static void get_flags(void)
7417 if (max_amd_level >= 0x80000001 &&
7418 max_amd_level <= 0x8000ffff) {
7419 u32 eax = 0x80000001;
7420- asm("cpuid"
7421+ asm volatile("cpuid"
7422 : "+a" (eax),
7423 "=c" (cpu.flags[6]),
7424 "=d" (cpu.flags[1])
7425@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7426 u32 ecx = MSR_K7_HWCR;
7427 u32 eax, edx;
7428
7429- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7430+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7431 eax &= ~(1 << 15);
7432- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7433+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7434
7435 get_flags(); /* Make sure it really did something */
7436 err = check_flags();
7437@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7438 u32 ecx = MSR_VIA_FCR;
7439 u32 eax, edx;
7440
7441- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7442+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7443 eax |= (1<<1)|(1<<7);
7444- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7445+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7446
7447 set_bit(X86_FEATURE_CX8, cpu.flags);
7448 err = check_flags();
7449@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7450 u32 eax, edx;
7451 u32 level = 1;
7452
7453- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7454- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7455- asm("cpuid"
7456+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7457+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7458+ asm volatile("cpuid"
7459 : "+a" (level), "=d" (cpu.flags[0])
7460 : : "ecx", "ebx");
7461- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7462+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7463
7464 err = check_flags();
7465 }
7466diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7467index b31cc54..8d69237 100644
7468--- a/arch/x86/boot/header.S
7469+++ b/arch/x86/boot/header.S
7470@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7471 # single linked list of
7472 # struct setup_data
7473
7474-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7475+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7476
7477 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7478 #define VO_INIT_SIZE (VO__end - VO__text)
7479diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7480index cae3feb..ff8ff2a 100644
7481--- a/arch/x86/boot/memory.c
7482+++ b/arch/x86/boot/memory.c
7483@@ -19,7 +19,7 @@
7484
7485 static int detect_memory_e820(void)
7486 {
7487- int count = 0;
7488+ unsigned int count = 0;
7489 struct biosregs ireg, oreg;
7490 struct e820entry *desc = boot_params.e820_map;
7491 static struct e820entry buf; /* static so it is zeroed */
7492diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7493index 11e8c6e..fdbb1ed 100644
7494--- a/arch/x86/boot/video-vesa.c
7495+++ b/arch/x86/boot/video-vesa.c
7496@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7497
7498 boot_params.screen_info.vesapm_seg = oreg.es;
7499 boot_params.screen_info.vesapm_off = oreg.di;
7500+ boot_params.screen_info.vesapm_size = oreg.cx;
7501 }
7502
7503 /*
7504diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7505index d42da38..787cdf3 100644
7506--- a/arch/x86/boot/video.c
7507+++ b/arch/x86/boot/video.c
7508@@ -90,7 +90,7 @@ static void store_mode_params(void)
7509 static unsigned int get_entry(void)
7510 {
7511 char entry_buf[4];
7512- int i, len = 0;
7513+ unsigned int i, len = 0;
7514 int key;
7515 unsigned int v;
7516
7517diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7518index 5b577d5..3c1fed4 100644
7519--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7520+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7521@@ -8,6 +8,8 @@
7522 * including this sentence is retained in full.
7523 */
7524
7525+#include <asm/alternative-asm.h>
7526+
7527 .extern crypto_ft_tab
7528 .extern crypto_it_tab
7529 .extern crypto_fl_tab
7530@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7531 je B192; \
7532 leaq 32(r9),r9;
7533
7534+#define ret pax_force_retaddr 0, 1; ret
7535+
7536 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7537 movq r1,r2; \
7538 movq r3,r4; \
7539diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7540index eb0566e..e3ebad8 100644
7541--- a/arch/x86/crypto/aesni-intel_asm.S
7542+++ b/arch/x86/crypto/aesni-intel_asm.S
7543@@ -16,6 +16,7 @@
7544 */
7545
7546 #include <linux/linkage.h>
7547+#include <asm/alternative-asm.h>
7548
7549 .text
7550
7551@@ -52,6 +53,7 @@ _key_expansion_256a:
7552 pxor %xmm1, %xmm0
7553 movaps %xmm0, (%rcx)
7554 add $0x10, %rcx
7555+ pax_force_retaddr_bts
7556 ret
7557
7558 _key_expansion_192a:
7559@@ -75,6 +77,7 @@ _key_expansion_192a:
7560 shufps $0b01001110, %xmm2, %xmm1
7561 movaps %xmm1, 16(%rcx)
7562 add $0x20, %rcx
7563+ pax_force_retaddr_bts
7564 ret
7565
7566 _key_expansion_192b:
7567@@ -93,6 +96,7 @@ _key_expansion_192b:
7568
7569 movaps %xmm0, (%rcx)
7570 add $0x10, %rcx
7571+ pax_force_retaddr_bts
7572 ret
7573
7574 _key_expansion_256b:
7575@@ -104,6 +108,7 @@ _key_expansion_256b:
7576 pxor %xmm1, %xmm2
7577 movaps %xmm2, (%rcx)
7578 add $0x10, %rcx
7579+ pax_force_retaddr_bts
7580 ret
7581
7582 /*
7583@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7584 cmp %rcx, %rdi
7585 jb .Ldec_key_loop
7586 xor %rax, %rax
7587+ pax_force_retaddr 0, 1
7588 ret
7589+ENDPROC(aesni_set_key)
7590
7591 /*
7592 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7593@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7594 movups (INP), STATE # input
7595 call _aesni_enc1
7596 movups STATE, (OUTP) # output
7597+ pax_force_retaddr 0, 1
7598 ret
7599+ENDPROC(aesni_enc)
7600
7601 /*
7602 * _aesni_enc1: internal ABI
7603@@ -319,6 +328,7 @@ _aesni_enc1:
7604 movaps 0x70(TKEYP), KEY
7605 # aesenclast KEY, STATE # last round
7606 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7607+ pax_force_retaddr_bts
7608 ret
7609
7610 /*
7611@@ -482,6 +492,7 @@ _aesni_enc4:
7612 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7613 # aesenclast KEY, STATE4
7614 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7615+ pax_force_retaddr_bts
7616 ret
7617
7618 /*
7619@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7620 movups (INP), STATE # input
7621 call _aesni_dec1
7622 movups STATE, (OUTP) #output
7623+ pax_force_retaddr 0, 1
7624 ret
7625+ENDPROC(aesni_dec)
7626
7627 /*
7628 * _aesni_dec1: internal ABI
7629@@ -563,6 +576,7 @@ _aesni_dec1:
7630 movaps 0x70(TKEYP), KEY
7631 # aesdeclast KEY, STATE # last round
7632 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7633+ pax_force_retaddr_bts
7634 ret
7635
7636 /*
7637@@ -726,6 +740,7 @@ _aesni_dec4:
7638 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7639 # aesdeclast KEY, STATE4
7640 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7641+ pax_force_retaddr_bts
7642 ret
7643
7644 /*
7645@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7646 cmp $16, LEN
7647 jge .Lecb_enc_loop1
7648 .Lecb_enc_ret:
7649+ pax_force_retaddr 0, 1
7650 ret
7651+ENDPROC(aesni_ecb_enc)
7652
7653 /*
7654 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7655@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7656 cmp $16, LEN
7657 jge .Lecb_dec_loop1
7658 .Lecb_dec_ret:
7659+ pax_force_retaddr 0, 1
7660 ret
7661+ENDPROC(aesni_ecb_dec)
7662
7663 /*
7664 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7665@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7666 jge .Lcbc_enc_loop
7667 movups STATE, (IVP)
7668 .Lcbc_enc_ret:
7669+ pax_force_retaddr 0, 1
7670 ret
7671+ENDPROC(aesni_cbc_enc)
7672
7673 /*
7674 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7675@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7676 .Lcbc_dec_ret:
7677 movups IV, (IVP)
7678 .Lcbc_dec_just_ret:
7679+ pax_force_retaddr 0, 1
7680 ret
7681+ENDPROC(aesni_cbc_dec)
7682diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7683index 6214a9b..1f4fc9a 100644
7684--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7685+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7686@@ -1,3 +1,5 @@
7687+#include <asm/alternative-asm.h>
7688+
7689 # enter ECRYPT_encrypt_bytes
7690 .text
7691 .p2align 5
7692@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7693 add %r11,%rsp
7694 mov %rdi,%rax
7695 mov %rsi,%rdx
7696+ pax_force_retaddr 0, 1
7697 ret
7698 # bytesatleast65:
7699 ._bytesatleast65:
7700@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7701 add %r11,%rsp
7702 mov %rdi,%rax
7703 mov %rsi,%rdx
7704+ pax_force_retaddr
7705 ret
7706 # enter ECRYPT_ivsetup
7707 .text
7708@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7709 add %r11,%rsp
7710 mov %rdi,%rax
7711 mov %rsi,%rdx
7712+ pax_force_retaddr
7713 ret
7714diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7715index 35974a5..5662ae2 100644
7716--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7717+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7718@@ -21,6 +21,7 @@
7719 .text
7720
7721 #include <asm/asm-offsets.h>
7722+#include <asm/alternative-asm.h>
7723
7724 #define a_offset 0
7725 #define b_offset 4
7726@@ -269,6 +270,7 @@ twofish_enc_blk:
7727
7728 popq R1
7729 movq $1,%rax
7730+ pax_force_retaddr 0, 1
7731 ret
7732
7733 twofish_dec_blk:
7734@@ -321,4 +323,5 @@ twofish_dec_blk:
7735
7736 popq R1
7737 movq $1,%rax
7738+ pax_force_retaddr 0, 1
7739 ret
7740diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7741index 14531ab..a89a0c0 100644
7742--- a/arch/x86/ia32/ia32_aout.c
7743+++ b/arch/x86/ia32/ia32_aout.c
7744@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7745 unsigned long dump_start, dump_size;
7746 struct user32 dump;
7747
7748+ memset(&dump, 0, sizeof(dump));
7749+
7750 fs = get_fs();
7751 set_fs(KERNEL_DS);
7752 has_dumped = 1;
7753@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7754 dump_size = dump.u_ssize << PAGE_SHIFT;
7755 DUMP_WRITE(dump_start, dump_size);
7756 }
7757- /*
7758- * Finally dump the task struct. Not be used by gdb, but
7759- * could be useful
7760- */
7761- set_fs(KERNEL_DS);
7762- DUMP_WRITE(current, sizeof(*current));
7763 end_coredump:
7764 set_fs(fs);
7765 return has_dumped;
7766diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7767index 588a7aa..a3468b0 100644
7768--- a/arch/x86/ia32/ia32_signal.c
7769+++ b/arch/x86/ia32/ia32_signal.c
7770@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7771 }
7772 seg = get_fs();
7773 set_fs(KERNEL_DS);
7774- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7775+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7776 set_fs(seg);
7777 if (ret >= 0 && uoss_ptr) {
7778 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7779@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7780 */
7781 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7782 size_t frame_size,
7783- void **fpstate)
7784+ void __user **fpstate)
7785 {
7786 unsigned long sp;
7787
7788@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7789
7790 if (used_math()) {
7791 sp = sp - sig_xstate_ia32_size;
7792- *fpstate = (struct _fpstate_ia32 *) sp;
7793+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7794 if (save_i387_xstate_ia32(*fpstate) < 0)
7795 return (void __user *) -1L;
7796 }
7797@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7798 sp -= frame_size;
7799 /* Align the stack pointer according to the i386 ABI,
7800 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7801- sp = ((sp + 4) & -16ul) - 4;
7802+ sp = ((sp - 12) & -16ul) - 4;
7803 return (void __user *) sp;
7804 }
7805
7806@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7807 * These are actually not used anymore, but left because some
7808 * gdb versions depend on them as a marker.
7809 */
7810- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7811+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7812 } put_user_catch(err);
7813
7814 if (err)
7815@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7816 0xb8,
7817 __NR_ia32_rt_sigreturn,
7818 0x80cd,
7819- 0,
7820+ 0
7821 };
7822
7823 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7824@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7825
7826 if (ka->sa.sa_flags & SA_RESTORER)
7827 restorer = ka->sa.sa_restorer;
7828+ else if (current->mm->context.vdso)
7829+ /* Return stub is in 32bit vsyscall page */
7830+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7831 else
7832- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7833- rt_sigreturn);
7834+ restorer = &frame->retcode;
7835 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7836
7837 /*
7838 * Not actually used anymore, but left because some gdb
7839 * versions need it.
7840 */
7841- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7842+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7843 } put_user_catch(err);
7844
7845 if (err)
7846diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7847index 4edd8eb..07ac7fd 100644
7848--- a/arch/x86/ia32/ia32entry.S
7849+++ b/arch/x86/ia32/ia32entry.S
7850@@ -13,7 +13,9 @@
7851 #include <asm/thread_info.h>
7852 #include <asm/segment.h>
7853 #include <asm/irqflags.h>
7854+#include <asm/pgtable.h>
7855 #include <linux/linkage.h>
7856+#include <asm/alternative-asm.h>
7857
7858 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7859 #include <linux/elf-em.h>
7860@@ -93,6 +95,30 @@ ENTRY(native_irq_enable_sysexit)
7861 ENDPROC(native_irq_enable_sysexit)
7862 #endif
7863
7864+ .macro pax_enter_kernel_user
7865+ pax_set_fptr_mask
7866+#ifdef CONFIG_PAX_MEMORY_UDEREF
7867+ call pax_enter_kernel_user
7868+#endif
7869+ .endm
7870+
7871+ .macro pax_exit_kernel_user
7872+#ifdef CONFIG_PAX_MEMORY_UDEREF
7873+ call pax_exit_kernel_user
7874+#endif
7875+#ifdef CONFIG_PAX_RANDKSTACK
7876+ pushq %rax
7877+ call pax_randomize_kstack
7878+ popq %rax
7879+#endif
7880+ .endm
7881+
7882+.macro pax_erase_kstack
7883+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7884+ call pax_erase_kstack
7885+#endif
7886+.endm
7887+
7888 /*
7889 * 32bit SYSENTER instruction entry.
7890 *
7891@@ -119,12 +145,6 @@ ENTRY(ia32_sysenter_target)
7892 CFI_REGISTER rsp,rbp
7893 SWAPGS_UNSAFE_STACK
7894 movq PER_CPU_VAR(kernel_stack), %rsp
7895- addq $(KERNEL_STACK_OFFSET),%rsp
7896- /*
7897- * No need to follow this irqs on/off section: the syscall
7898- * disabled irqs, here we enable it straight after entry:
7899- */
7900- ENABLE_INTERRUPTS(CLBR_NONE)
7901 movl %ebp,%ebp /* zero extension */
7902 pushq $__USER32_DS
7903 CFI_ADJUST_CFA_OFFSET 8
7904@@ -135,28 +155,41 @@ ENTRY(ia32_sysenter_target)
7905 pushfq
7906 CFI_ADJUST_CFA_OFFSET 8
7907 /*CFI_REL_OFFSET rflags,0*/
7908- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7909- CFI_REGISTER rip,r10
7910+ GET_THREAD_INFO(%r11)
7911+ movl TI_sysenter_return(%r11), %r11d
7912+ CFI_REGISTER rip,r11
7913 pushq $__USER32_CS
7914 CFI_ADJUST_CFA_OFFSET 8
7915 /*CFI_REL_OFFSET cs,0*/
7916 movl %eax, %eax
7917- pushq %r10
7918+ pushq %r11
7919 CFI_ADJUST_CFA_OFFSET 8
7920 CFI_REL_OFFSET rip,0
7921 pushq %rax
7922 CFI_ADJUST_CFA_OFFSET 8
7923 cld
7924 SAVE_ARGS 0,0,1
7925+ pax_enter_kernel_user
7926+ /*
7927+ * No need to follow this irqs on/off section: the syscall
7928+ * disabled irqs, here we enable it straight after entry:
7929+ */
7930+ ENABLE_INTERRUPTS(CLBR_NONE)
7931 /* no need to do an access_ok check here because rbp has been
7932 32bit zero extended */
7933+
7934+#ifdef CONFIG_PAX_MEMORY_UDEREF
7935+ mov $PAX_USER_SHADOW_BASE,%r11
7936+ add %r11,%rbp
7937+#endif
7938+
7939 1: movl (%rbp),%ebp
7940 .section __ex_table,"a"
7941 .quad 1b,ia32_badarg
7942 .previous
7943- GET_THREAD_INFO(%r10)
7944- orl $TS_COMPAT,TI_status(%r10)
7945- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7946+ GET_THREAD_INFO(%r11)
7947+ orl $TS_COMPAT,TI_status(%r11)
7948+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7949 CFI_REMEMBER_STATE
7950 jnz sysenter_tracesys
7951 cmpq $(IA32_NR_syscalls-1),%rax
7952@@ -166,13 +199,15 @@ sysenter_do_call:
7953 sysenter_dispatch:
7954 call *ia32_sys_call_table(,%rax,8)
7955 movq %rax,RAX-ARGOFFSET(%rsp)
7956- GET_THREAD_INFO(%r10)
7957+ GET_THREAD_INFO(%r11)
7958 DISABLE_INTERRUPTS(CLBR_NONE)
7959 TRACE_IRQS_OFF
7960- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7961+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7962 jnz sysexit_audit
7963 sysexit_from_sys_call:
7964- andl $~TS_COMPAT,TI_status(%r10)
7965+ pax_exit_kernel_user
7966+ pax_erase_kstack
7967+ andl $~TS_COMPAT,TI_status(%r11)
7968 /* clear IF, that popfq doesn't enable interrupts early */
7969 andl $~0x200,EFLAGS-R11(%rsp)
7970 movl RIP-R11(%rsp),%edx /* User %eip */
7971@@ -200,6 +235,9 @@ sysexit_from_sys_call:
7972 movl %eax,%esi /* 2nd arg: syscall number */
7973 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7974 call audit_syscall_entry
7975+
7976+ pax_erase_kstack
7977+
7978 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7979 cmpq $(IA32_NR_syscalls-1),%rax
7980 ja ia32_badsys
7981@@ -211,7 +249,7 @@ sysexit_from_sys_call:
7982 .endm
7983
7984 .macro auditsys_exit exit
7985- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7986+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7987 jnz ia32_ret_from_sys_call
7988 TRACE_IRQS_ON
7989 sti
7990@@ -221,12 +259,12 @@ sysexit_from_sys_call:
7991 movzbl %al,%edi /* zero-extend that into %edi */
7992 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7993 call audit_syscall_exit
7994- GET_THREAD_INFO(%r10)
7995+ GET_THREAD_INFO(%r11)
7996 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7997 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7998 cli
7999 TRACE_IRQS_OFF
8000- testl %edi,TI_flags(%r10)
8001+ testl %edi,TI_flags(%r11)
8002 jz \exit
8003 CLEAR_RREGS -ARGOFFSET
8004 jmp int_with_check
8005@@ -244,7 +282,7 @@ sysexit_audit:
8006
8007 sysenter_tracesys:
8008 #ifdef CONFIG_AUDITSYSCALL
8009- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8010+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8011 jz sysenter_auditsys
8012 #endif
8013 SAVE_REST
8014@@ -252,6 +290,9 @@ sysenter_tracesys:
8015 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8016 movq %rsp,%rdi /* &pt_regs -> arg1 */
8017 call syscall_trace_enter
8018+
8019+ pax_erase_kstack
8020+
8021 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8022 RESTORE_REST
8023 cmpq $(IA32_NR_syscalls-1),%rax
8024@@ -283,19 +324,20 @@ ENDPROC(ia32_sysenter_target)
8025 ENTRY(ia32_cstar_target)
8026 CFI_STARTPROC32 simple
8027 CFI_SIGNAL_FRAME
8028- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8029+ CFI_DEF_CFA rsp,0
8030 CFI_REGISTER rip,rcx
8031 /*CFI_REGISTER rflags,r11*/
8032 SWAPGS_UNSAFE_STACK
8033 movl %esp,%r8d
8034 CFI_REGISTER rsp,r8
8035 movq PER_CPU_VAR(kernel_stack),%rsp
8036+ SAVE_ARGS 8*6,1,1
8037+ pax_enter_kernel_user
8038 /*
8039 * No need to follow this irqs on/off section: the syscall
8040 * disabled irqs and here we enable it straight after entry:
8041 */
8042 ENABLE_INTERRUPTS(CLBR_NONE)
8043- SAVE_ARGS 8,1,1
8044 movl %eax,%eax /* zero extension */
8045 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8046 movq %rcx,RIP-ARGOFFSET(%rsp)
8047@@ -311,13 +353,19 @@ ENTRY(ia32_cstar_target)
8048 /* no need to do an access_ok check here because r8 has been
8049 32bit zero extended */
8050 /* hardware stack frame is complete now */
8051+
8052+#ifdef CONFIG_PAX_MEMORY_UDEREF
8053+ mov $PAX_USER_SHADOW_BASE,%r11
8054+ add %r11,%r8
8055+#endif
8056+
8057 1: movl (%r8),%r9d
8058 .section __ex_table,"a"
8059 .quad 1b,ia32_badarg
8060 .previous
8061- GET_THREAD_INFO(%r10)
8062- orl $TS_COMPAT,TI_status(%r10)
8063- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8064+ GET_THREAD_INFO(%r11)
8065+ orl $TS_COMPAT,TI_status(%r11)
8066+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8067 CFI_REMEMBER_STATE
8068 jnz cstar_tracesys
8069 cmpq $IA32_NR_syscalls-1,%rax
8070@@ -327,13 +375,15 @@ cstar_do_call:
8071 cstar_dispatch:
8072 call *ia32_sys_call_table(,%rax,8)
8073 movq %rax,RAX-ARGOFFSET(%rsp)
8074- GET_THREAD_INFO(%r10)
8075+ GET_THREAD_INFO(%r11)
8076 DISABLE_INTERRUPTS(CLBR_NONE)
8077 TRACE_IRQS_OFF
8078- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8079+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8080 jnz sysretl_audit
8081 sysretl_from_sys_call:
8082- andl $~TS_COMPAT,TI_status(%r10)
8083+ pax_exit_kernel_user
8084+ pax_erase_kstack
8085+ andl $~TS_COMPAT,TI_status(%r11)
8086 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8087 movl RIP-ARGOFFSET(%rsp),%ecx
8088 CFI_REGISTER rip,rcx
8089@@ -361,7 +411,7 @@ sysretl_audit:
8090
8091 cstar_tracesys:
8092 #ifdef CONFIG_AUDITSYSCALL
8093- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8094+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8095 jz cstar_auditsys
8096 #endif
8097 xchgl %r9d,%ebp
8098@@ -370,6 +420,9 @@ cstar_tracesys:
8099 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8100 movq %rsp,%rdi /* &pt_regs -> arg1 */
8101 call syscall_trace_enter
8102+
8103+ pax_erase_kstack
8104+
8105 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8106 RESTORE_REST
8107 xchgl %ebp,%r9d
8108@@ -415,11 +468,6 @@ ENTRY(ia32_syscall)
8109 CFI_REL_OFFSET rip,RIP-RIP
8110 PARAVIRT_ADJUST_EXCEPTION_FRAME
8111 SWAPGS
8112- /*
8113- * No need to follow this irqs on/off section: the syscall
8114- * disabled irqs and here we enable it straight after entry:
8115- */
8116- ENABLE_INTERRUPTS(CLBR_NONE)
8117 movl %eax,%eax
8118 pushq %rax
8119 CFI_ADJUST_CFA_OFFSET 8
8120@@ -427,9 +475,15 @@ ENTRY(ia32_syscall)
8121 /* note the registers are not zero extended to the sf.
8122 this could be a problem. */
8123 SAVE_ARGS 0,0,1
8124- GET_THREAD_INFO(%r10)
8125- orl $TS_COMPAT,TI_status(%r10)
8126- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8127+ pax_enter_kernel_user
8128+ /*
8129+ * No need to follow this irqs on/off section: the syscall
8130+ * disabled irqs and here we enable it straight after entry:
8131+ */
8132+ ENABLE_INTERRUPTS(CLBR_NONE)
8133+ GET_THREAD_INFO(%r11)
8134+ orl $TS_COMPAT,TI_status(%r11)
8135+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8136 jnz ia32_tracesys
8137 cmpq $(IA32_NR_syscalls-1),%rax
8138 ja ia32_badsys
8139@@ -448,6 +502,9 @@ ia32_tracesys:
8140 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8141 movq %rsp,%rdi /* &pt_regs -> arg1 */
8142 call syscall_trace_enter
8143+
8144+ pax_erase_kstack
8145+
8146 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8147 RESTORE_REST
8148 cmpq $(IA32_NR_syscalls-1),%rax
8149@@ -462,6 +519,7 @@ ia32_badsys:
8150
8151 quiet_ni_syscall:
8152 movq $-ENOSYS,%rax
8153+ pax_force_retaddr
8154 ret
8155 CFI_ENDPROC
8156
8157diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8158index 016218c..47ccbdd 100644
8159--- a/arch/x86/ia32/sys_ia32.c
8160+++ b/arch/x86/ia32/sys_ia32.c
8161@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8162 */
8163 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8164 {
8165- typeof(ubuf->st_uid) uid = 0;
8166- typeof(ubuf->st_gid) gid = 0;
8167+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8168+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8169 SET_UID(uid, stat->uid);
8170 SET_GID(gid, stat->gid);
8171 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8172@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8173 }
8174 set_fs(KERNEL_DS);
8175 ret = sys_rt_sigprocmask(how,
8176- set ? (sigset_t __user *)&s : NULL,
8177- oset ? (sigset_t __user *)&s : NULL,
8178+ set ? (sigset_t __force_user *)&s : NULL,
8179+ oset ? (sigset_t __force_user *)&s : NULL,
8180 sigsetsize);
8181 set_fs(old_fs);
8182 if (ret)
8183@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8184 mm_segment_t old_fs = get_fs();
8185
8186 set_fs(KERNEL_DS);
8187- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8188+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8189 set_fs(old_fs);
8190 if (put_compat_timespec(&t, interval))
8191 return -EFAULT;
8192@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8193 mm_segment_t old_fs = get_fs();
8194
8195 set_fs(KERNEL_DS);
8196- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8197+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8198 set_fs(old_fs);
8199 if (!ret) {
8200 switch (_NSIG_WORDS) {
8201@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8202 if (copy_siginfo_from_user32(&info, uinfo))
8203 return -EFAULT;
8204 set_fs(KERNEL_DS);
8205- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8206+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8207 set_fs(old_fs);
8208 return ret;
8209 }
8210@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8211 return -EFAULT;
8212
8213 set_fs(KERNEL_DS);
8214- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8215+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8216 count);
8217 set_fs(old_fs);
8218
8219diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8220index e2077d3..e134a5e 100644
8221--- a/arch/x86/include/asm/alternative-asm.h
8222+++ b/arch/x86/include/asm/alternative-asm.h
8223@@ -19,4 +19,43 @@
8224 .endm
8225 #endif
8226
8227+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8228+ .macro pax_force_retaddr_bts rip=0
8229+ btsq $63,\rip(%rsp)
8230+ .endm
8231+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8232+ .macro pax_force_retaddr rip=0, reload=0
8233+ btsq $63,\rip(%rsp)
8234+ .endm
8235+ .macro pax_force_fptr ptr
8236+ btsq $63,\ptr
8237+ .endm
8238+ .macro pax_set_fptr_mask
8239+ .endm
8240+#endif
8241+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8242+ .macro pax_force_retaddr rip=0, reload=0
8243+ .if \reload
8244+ pax_set_fptr_mask
8245+ .endif
8246+ orq %r10,\rip(%rsp)
8247+ .endm
8248+ .macro pax_force_fptr ptr
8249+ orq %r10,\ptr
8250+ .endm
8251+ .macro pax_set_fptr_mask
8252+ movabs $0x8000000000000000,%r10
8253+ .endm
8254+#endif
8255+#else
8256+ .macro pax_force_retaddr rip=0, reload=0
8257+ .endm
8258+ .macro pax_force_fptr ptr
8259+ .endm
8260+ .macro pax_force_retaddr_bts rip=0
8261+ .endm
8262+ .macro pax_set_fptr_mask
8263+ .endm
8264+#endif
8265+
8266 #endif /* __ASSEMBLY__ */
8267diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8268index c240efc..fdfadf3 100644
8269--- a/arch/x86/include/asm/alternative.h
8270+++ b/arch/x86/include/asm/alternative.h
8271@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8272 " .byte 662b-661b\n" /* sourcelen */ \
8273 " .byte 664f-663f\n" /* replacementlen */ \
8274 ".previous\n" \
8275- ".section .altinstr_replacement, \"ax\"\n" \
8276+ ".section .altinstr_replacement, \"a\"\n" \
8277 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8278 ".previous"
8279
8280diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8281index 474d80d..1f97d58 100644
8282--- a/arch/x86/include/asm/apic.h
8283+++ b/arch/x86/include/asm/apic.h
8284@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8285
8286 #ifdef CONFIG_X86_LOCAL_APIC
8287
8288-extern unsigned int apic_verbosity;
8289+extern int apic_verbosity;
8290 extern int local_apic_timer_c2_ok;
8291
8292 extern int disable_apic;
8293diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8294index 20370c6..a2eb9b0 100644
8295--- a/arch/x86/include/asm/apm.h
8296+++ b/arch/x86/include/asm/apm.h
8297@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8298 __asm__ __volatile__(APM_DO_ZERO_SEGS
8299 "pushl %%edi\n\t"
8300 "pushl %%ebp\n\t"
8301- "lcall *%%cs:apm_bios_entry\n\t"
8302+ "lcall *%%ss:apm_bios_entry\n\t"
8303 "setc %%al\n\t"
8304 "popl %%ebp\n\t"
8305 "popl %%edi\n\t"
8306@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8307 __asm__ __volatile__(APM_DO_ZERO_SEGS
8308 "pushl %%edi\n\t"
8309 "pushl %%ebp\n\t"
8310- "lcall *%%cs:apm_bios_entry\n\t"
8311+ "lcall *%%ss:apm_bios_entry\n\t"
8312 "setc %%bl\n\t"
8313 "popl %%ebp\n\t"
8314 "popl %%edi\n\t"
8315diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8316index dc5a667..fbed878 100644
8317--- a/arch/x86/include/asm/atomic_32.h
8318+++ b/arch/x86/include/asm/atomic_32.h
8319@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8320 }
8321
8322 /**
8323+ * atomic_read_unchecked - read atomic variable
8324+ * @v: pointer of type atomic_unchecked_t
8325+ *
8326+ * Atomically reads the value of @v.
8327+ */
8328+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8329+{
8330+ return v->counter;
8331+}
8332+
8333+/**
8334 * atomic_set - set atomic variable
8335 * @v: pointer of type atomic_t
8336 * @i: required value
8337@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8338 }
8339
8340 /**
8341+ * atomic_set_unchecked - set atomic variable
8342+ * @v: pointer of type atomic_unchecked_t
8343+ * @i: required value
8344+ *
8345+ * Atomically sets the value of @v to @i.
8346+ */
8347+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8348+{
8349+ v->counter = i;
8350+}
8351+
8352+/**
8353 * atomic_add - add integer to atomic variable
8354 * @i: integer value to add
8355 * @v: pointer of type atomic_t
8356@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8357 */
8358 static inline void atomic_add(int i, atomic_t *v)
8359 {
8360- asm volatile(LOCK_PREFIX "addl %1,%0"
8361+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8362+
8363+#ifdef CONFIG_PAX_REFCOUNT
8364+ "jno 0f\n"
8365+ LOCK_PREFIX "subl %1,%0\n"
8366+ "int $4\n0:\n"
8367+ _ASM_EXTABLE(0b, 0b)
8368+#endif
8369+
8370+ : "+m" (v->counter)
8371+ : "ir" (i));
8372+}
8373+
8374+/**
8375+ * atomic_add_unchecked - add integer to atomic variable
8376+ * @i: integer value to add
8377+ * @v: pointer of type atomic_unchecked_t
8378+ *
8379+ * Atomically adds @i to @v.
8380+ */
8381+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8382+{
8383+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8384 : "+m" (v->counter)
8385 : "ir" (i));
8386 }
8387@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8388 */
8389 static inline void atomic_sub(int i, atomic_t *v)
8390 {
8391- asm volatile(LOCK_PREFIX "subl %1,%0"
8392+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8393+
8394+#ifdef CONFIG_PAX_REFCOUNT
8395+ "jno 0f\n"
8396+ LOCK_PREFIX "addl %1,%0\n"
8397+ "int $4\n0:\n"
8398+ _ASM_EXTABLE(0b, 0b)
8399+#endif
8400+
8401+ : "+m" (v->counter)
8402+ : "ir" (i));
8403+}
8404+
8405+/**
8406+ * atomic_sub_unchecked - subtract integer from atomic variable
8407+ * @i: integer value to subtract
8408+ * @v: pointer of type atomic_unchecked_t
8409+ *
8410+ * Atomically subtracts @i from @v.
8411+ */
8412+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8413+{
8414+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8415 : "+m" (v->counter)
8416 : "ir" (i));
8417 }
8418@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8419 {
8420 unsigned char c;
8421
8422- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8423+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8424+
8425+#ifdef CONFIG_PAX_REFCOUNT
8426+ "jno 0f\n"
8427+ LOCK_PREFIX "addl %2,%0\n"
8428+ "int $4\n0:\n"
8429+ _ASM_EXTABLE(0b, 0b)
8430+#endif
8431+
8432+ "sete %1\n"
8433 : "+m" (v->counter), "=qm" (c)
8434 : "ir" (i) : "memory");
8435 return c;
8436@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8437 */
8438 static inline void atomic_inc(atomic_t *v)
8439 {
8440- asm volatile(LOCK_PREFIX "incl %0"
8441+ asm volatile(LOCK_PREFIX "incl %0\n"
8442+
8443+#ifdef CONFIG_PAX_REFCOUNT
8444+ "jno 0f\n"
8445+ LOCK_PREFIX "decl %0\n"
8446+ "int $4\n0:\n"
8447+ _ASM_EXTABLE(0b, 0b)
8448+#endif
8449+
8450+ : "+m" (v->counter));
8451+}
8452+
8453+/**
8454+ * atomic_inc_unchecked - increment atomic variable
8455+ * @v: pointer of type atomic_unchecked_t
8456+ *
8457+ * Atomically increments @v by 1.
8458+ */
8459+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8460+{
8461+ asm volatile(LOCK_PREFIX "incl %0\n"
8462 : "+m" (v->counter));
8463 }
8464
8465@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8466 */
8467 static inline void atomic_dec(atomic_t *v)
8468 {
8469- asm volatile(LOCK_PREFIX "decl %0"
8470+ asm volatile(LOCK_PREFIX "decl %0\n"
8471+
8472+#ifdef CONFIG_PAX_REFCOUNT
8473+ "jno 0f\n"
8474+ LOCK_PREFIX "incl %0\n"
8475+ "int $4\n0:\n"
8476+ _ASM_EXTABLE(0b, 0b)
8477+#endif
8478+
8479+ : "+m" (v->counter));
8480+}
8481+
8482+/**
8483+ * atomic_dec_unchecked - decrement atomic variable
8484+ * @v: pointer of type atomic_unchecked_t
8485+ *
8486+ * Atomically decrements @v by 1.
8487+ */
8488+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8489+{
8490+ asm volatile(LOCK_PREFIX "decl %0\n"
8491 : "+m" (v->counter));
8492 }
8493
8494@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8495 {
8496 unsigned char c;
8497
8498- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8499+ asm volatile(LOCK_PREFIX "decl %0\n"
8500+
8501+#ifdef CONFIG_PAX_REFCOUNT
8502+ "jno 0f\n"
8503+ LOCK_PREFIX "incl %0\n"
8504+ "int $4\n0:\n"
8505+ _ASM_EXTABLE(0b, 0b)
8506+#endif
8507+
8508+ "sete %1\n"
8509 : "+m" (v->counter), "=qm" (c)
8510 : : "memory");
8511 return c != 0;
8512@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8513 {
8514 unsigned char c;
8515
8516- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8517+ asm volatile(LOCK_PREFIX "incl %0\n"
8518+
8519+#ifdef CONFIG_PAX_REFCOUNT
8520+ "jno 0f\n"
8521+ LOCK_PREFIX "decl %0\n"
8522+ "into\n0:\n"
8523+ _ASM_EXTABLE(0b, 0b)
8524+#endif
8525+
8526+ "sete %1\n"
8527+ : "+m" (v->counter), "=qm" (c)
8528+ : : "memory");
8529+ return c != 0;
8530+}
8531+
8532+/**
8533+ * atomic_inc_and_test_unchecked - increment and test
8534+ * @v: pointer of type atomic_unchecked_t
8535+ *
8536+ * Atomically increments @v by 1
8537+ * and returns true if the result is zero, or false for all
8538+ * other cases.
8539+ */
8540+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8541+{
8542+ unsigned char c;
8543+
8544+ asm volatile(LOCK_PREFIX "incl %0\n"
8545+ "sete %1\n"
8546 : "+m" (v->counter), "=qm" (c)
8547 : : "memory");
8548 return c != 0;
8549@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8550 {
8551 unsigned char c;
8552
8553- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8554+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8555+
8556+#ifdef CONFIG_PAX_REFCOUNT
8557+ "jno 0f\n"
8558+ LOCK_PREFIX "subl %2,%0\n"
8559+ "int $4\n0:\n"
8560+ _ASM_EXTABLE(0b, 0b)
8561+#endif
8562+
8563+ "sets %1\n"
8564 : "+m" (v->counter), "=qm" (c)
8565 : "ir" (i) : "memory");
8566 return c;
8567@@ -179,6 +341,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
8568 #endif
8569 /* Modern 486+ processor */
8570 __i = i;
8571+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8572+
8573+#ifdef CONFIG_PAX_REFCOUNT
8574+ "jno 0f\n"
8575+ "movl %0, %1\n"
8576+ "int $4\n0:\n"
8577+ _ASM_EXTABLE(0b, 0b)
8578+#endif
8579+
8580+ : "+r" (i), "+m" (v->counter)
8581+ : : "memory");
8582+ return i + __i;
8583+
8584+#ifdef CONFIG_M386
8585+no_xadd: /* Legacy 386 processor */
8586+ local_irq_save(flags);
8587+ __i = atomic_read(v);
8588+ atomic_set(v, i + __i);
8589+ local_irq_restore(flags);
8590+ return i + __i;
8591+#endif
8592+}
8593+
8594+/**
8595+ * atomic_add_return_unchecked - add integer and return
8596+ * @v: pointer of type atomic_unchecked_t
8597+ * @i: integer value to add
8598+ *
8599+ * Atomically adds @i to @v and returns @i + @v
8600+ */
8601+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8602+{
8603+ int __i;
8604+#ifdef CONFIG_M386
8605+ unsigned long flags;
8606+ if (unlikely(boot_cpu_data.x86 <= 3))
8607+ goto no_xadd;
8608+#endif
8609+ /* Modern 486+ processor */
8610+ __i = i;
8611 asm volatile(LOCK_PREFIX "xaddl %0, %1"
8612 : "+r" (i), "+m" (v->counter)
8613 : : "memory");
8614@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8615 return cmpxchg(&v->counter, old, new);
8616 }
8617
8618+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8619+{
8620+ return cmpxchg(&v->counter, old, new);
8621+}
8622+
8623 static inline int atomic_xchg(atomic_t *v, int new)
8624 {
8625 return xchg(&v->counter, new);
8626 }
8627
8628+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8629+{
8630+ return xchg(&v->counter, new);
8631+}
8632+
8633 /**
8634 * atomic_add_unless - add unless the number is already a given value
8635 * @v: pointer of type atomic_t
8636@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8637 */
8638 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8639 {
8640- int c, old;
8641+ int c, old, new;
8642 c = atomic_read(v);
8643 for (;;) {
8644- if (unlikely(c == (u)))
8645+ if (unlikely(c == u))
8646 break;
8647- old = atomic_cmpxchg((v), c, c + (a));
8648+
8649+ asm volatile("addl %2,%0\n"
8650+
8651+#ifdef CONFIG_PAX_REFCOUNT
8652+ "jno 0f\n"
8653+ "subl %2,%0\n"
8654+ "int $4\n0:\n"
8655+ _ASM_EXTABLE(0b, 0b)
8656+#endif
8657+
8658+ : "=r" (new)
8659+ : "0" (c), "ir" (a));
8660+
8661+ old = atomic_cmpxchg(v, c, new);
8662 if (likely(old == c))
8663 break;
8664 c = old;
8665 }
8666- return c != (u);
8667+ return c != u;
8668 }
8669
8670 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8671
8672 #define atomic_inc_return(v) (atomic_add_return(1, v))
8673+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8674+{
8675+ return atomic_add_return_unchecked(1, v);
8676+}
8677 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8678
8679 /* These are x86-specific, used by some header files */
8680@@ -266,9 +495,18 @@ typedef struct {
8681 u64 __aligned(8) counter;
8682 } atomic64_t;
8683
8684+#ifdef CONFIG_PAX_REFCOUNT
8685+typedef struct {
8686+ u64 __aligned(8) counter;
8687+} atomic64_unchecked_t;
8688+#else
8689+typedef atomic64_t atomic64_unchecked_t;
8690+#endif
8691+
8692 #define ATOMIC64_INIT(val) { (val) }
8693
8694 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8695+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8696
8697 /**
8698 * atomic64_xchg - xchg atomic64 variable
8699@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8700 * the old value.
8701 */
8702 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8703+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8704
8705 /**
8706 * atomic64_set - set atomic64 variable
8707@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8708 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8709
8710 /**
8711+ * atomic64_unchecked_set - set atomic64 variable
8712+ * @ptr: pointer to type atomic64_unchecked_t
8713+ * @new_val: value to assign
8714+ *
8715+ * Atomically sets the value of @ptr to @new_val.
8716+ */
8717+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8718+
8719+/**
8720 * atomic64_read - read atomic64 variable
8721 * @ptr: pointer to type atomic64_t
8722 *
8723@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8724 return res;
8725 }
8726
8727-extern u64 atomic64_read(atomic64_t *ptr);
8728+/**
8729+ * atomic64_read_unchecked - read atomic64 variable
8730+ * @ptr: pointer to type atomic64_unchecked_t
8731+ *
8732+ * Atomically reads the value of @ptr and returns it.
8733+ */
8734+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8735+{
8736+ u64 res;
8737+
8738+ /*
8739+ * Note, we inline this atomic64_unchecked_t primitive because
8740+ * it only clobbers EAX/EDX and leaves the others
8741+ * untouched. We also (somewhat subtly) rely on the
8742+ * fact that cmpxchg8b returns the current 64-bit value
8743+ * of the memory location we are touching:
8744+ */
8745+ asm volatile(
8746+ "mov %%ebx, %%eax\n\t"
8747+ "mov %%ecx, %%edx\n\t"
8748+ LOCK_PREFIX "cmpxchg8b %1\n"
8749+ : "=&A" (res)
8750+ : "m" (*ptr)
8751+ );
8752+
8753+ return res;
8754+}
8755
8756 /**
8757 * atomic64_add_return - add and return
8758@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8759 * Other variants with different arithmetic operators:
8760 */
8761 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8762+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8763 extern u64 atomic64_inc_return(atomic64_t *ptr);
8764+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8765 extern u64 atomic64_dec_return(atomic64_t *ptr);
8766+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8767
8768 /**
8769 * atomic64_add - add integer to atomic64 variable
8770@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8771 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8772
8773 /**
8774+ * atomic64_add_unchecked - add integer to atomic64 variable
8775+ * @delta: integer value to add
8776+ * @ptr: pointer to type atomic64_unchecked_t
8777+ *
8778+ * Atomically adds @delta to @ptr.
8779+ */
8780+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8781+
8782+/**
8783 * atomic64_sub - subtract the atomic64 variable
8784 * @delta: integer value to subtract
8785 * @ptr: pointer to type atomic64_t
8786@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8787 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8788
8789 /**
8790+ * atomic64_sub_unchecked - subtract the atomic64 variable
8791+ * @delta: integer value to subtract
8792+ * @ptr: pointer to type atomic64_unchecked_t
8793+ *
8794+ * Atomically subtracts @delta from @ptr.
8795+ */
8796+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8797+
8798+/**
8799 * atomic64_sub_and_test - subtract value from variable and test result
8800 * @delta: integer value to subtract
8801 * @ptr: pointer to type atomic64_t
8802@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8803 extern void atomic64_inc(atomic64_t *ptr);
8804
8805 /**
8806+ * atomic64_inc_unchecked - increment atomic64 variable
8807+ * @ptr: pointer to type atomic64_unchecked_t
8808+ *
8809+ * Atomically increments @ptr by 1.
8810+ */
8811+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8812+
8813+/**
8814 * atomic64_dec - decrement atomic64 variable
8815 * @ptr: pointer to type atomic64_t
8816 *
8817@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8818 extern void atomic64_dec(atomic64_t *ptr);
8819
8820 /**
8821+ * atomic64_dec_unchecked - decrement atomic64 variable
8822+ * @ptr: pointer to type atomic64_unchecked_t
8823+ *
8824+ * Atomically decrements @ptr by 1.
8825+ */
8826+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8827+
8828+/**
8829 * atomic64_dec_and_test - decrement and test
8830 * @ptr: pointer to type atomic64_t
8831 *
8832diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8833index d605dc2..fafd7bd 100644
8834--- a/arch/x86/include/asm/atomic_64.h
8835+++ b/arch/x86/include/asm/atomic_64.h
8836@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8837 }
8838
8839 /**
8840+ * atomic_read_unchecked - read atomic variable
8841+ * @v: pointer of type atomic_unchecked_t
8842+ *
8843+ * Atomically reads the value of @v.
8844+ */
8845+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8846+{
8847+ return v->counter;
8848+}
8849+
8850+/**
8851 * atomic_set - set atomic variable
8852 * @v: pointer of type atomic_t
8853 * @i: required value
8854@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8855 }
8856
8857 /**
8858+ * atomic_set_unchecked - set atomic variable
8859+ * @v: pointer of type atomic_unchecked_t
8860+ * @i: required value
8861+ *
8862+ * Atomically sets the value of @v to @i.
8863+ */
8864+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8865+{
8866+ v->counter = i;
8867+}
8868+
8869+/**
8870 * atomic_add - add integer to atomic variable
8871 * @i: integer value to add
8872 * @v: pointer of type atomic_t
8873@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8874 */
8875 static inline void atomic_add(int i, atomic_t *v)
8876 {
8877- asm volatile(LOCK_PREFIX "addl %1,%0"
8878+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8879+
8880+#ifdef CONFIG_PAX_REFCOUNT
8881+ "jno 0f\n"
8882+ LOCK_PREFIX "subl %1,%0\n"
8883+ "int $4\n0:\n"
8884+ _ASM_EXTABLE(0b, 0b)
8885+#endif
8886+
8887+ : "=m" (v->counter)
8888+ : "ir" (i), "m" (v->counter));
8889+}
8890+
8891+/**
8892+ * atomic_add_unchecked - add integer to atomic variable
8893+ * @i: integer value to add
8894+ * @v: pointer of type atomic_unchecked_t
8895+ *
8896+ * Atomically adds @i to @v.
8897+ */
8898+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8899+{
8900+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8901 : "=m" (v->counter)
8902 : "ir" (i), "m" (v->counter));
8903 }
8904@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8905 */
8906 static inline void atomic_sub(int i, atomic_t *v)
8907 {
8908- asm volatile(LOCK_PREFIX "subl %1,%0"
8909+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8910+
8911+#ifdef CONFIG_PAX_REFCOUNT
8912+ "jno 0f\n"
8913+ LOCK_PREFIX "addl %1,%0\n"
8914+ "int $4\n0:\n"
8915+ _ASM_EXTABLE(0b, 0b)
8916+#endif
8917+
8918+ : "=m" (v->counter)
8919+ : "ir" (i), "m" (v->counter));
8920+}
8921+
8922+/**
8923+ * atomic_sub_unchecked - subtract the atomic variable
8924+ * @i: integer value to subtract
8925+ * @v: pointer of type atomic_unchecked_t
8926+ *
8927+ * Atomically subtracts @i from @v.
8928+ */
8929+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8930+{
8931+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8932 : "=m" (v->counter)
8933 : "ir" (i), "m" (v->counter));
8934 }
8935@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8936 {
8937 unsigned char c;
8938
8939- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8940+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8941+
8942+#ifdef CONFIG_PAX_REFCOUNT
8943+ "jno 0f\n"
8944+ LOCK_PREFIX "addl %2,%0\n"
8945+ "int $4\n0:\n"
8946+ _ASM_EXTABLE(0b, 0b)
8947+#endif
8948+
8949+ "sete %1\n"
8950 : "=m" (v->counter), "=qm" (c)
8951 : "ir" (i), "m" (v->counter) : "memory");
8952 return c;
8953@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8954 */
8955 static inline void atomic_inc(atomic_t *v)
8956 {
8957- asm volatile(LOCK_PREFIX "incl %0"
8958+ asm volatile(LOCK_PREFIX "incl %0\n"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+ "jno 0f\n"
8962+ LOCK_PREFIX "decl %0\n"
8963+ "int $4\n0:\n"
8964+ _ASM_EXTABLE(0b, 0b)
8965+#endif
8966+
8967+ : "=m" (v->counter)
8968+ : "m" (v->counter));
8969+}
8970+
8971+/**
8972+ * atomic_inc_unchecked - increment atomic variable
8973+ * @v: pointer of type atomic_unchecked_t
8974+ *
8975+ * Atomically increments @v by 1.
8976+ */
8977+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8978+{
8979+ asm volatile(LOCK_PREFIX "incl %0\n"
8980 : "=m" (v->counter)
8981 : "m" (v->counter));
8982 }
8983@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
8984 */
8985 static inline void atomic_dec(atomic_t *v)
8986 {
8987- asm volatile(LOCK_PREFIX "decl %0"
8988+ asm volatile(LOCK_PREFIX "decl %0\n"
8989+
8990+#ifdef CONFIG_PAX_REFCOUNT
8991+ "jno 0f\n"
8992+ LOCK_PREFIX "incl %0\n"
8993+ "int $4\n0:\n"
8994+ _ASM_EXTABLE(0b, 0b)
8995+#endif
8996+
8997+ : "=m" (v->counter)
8998+ : "m" (v->counter));
8999+}
9000+
9001+/**
9002+ * atomic_dec_unchecked - decrement atomic variable
9003+ * @v: pointer of type atomic_unchecked_t
9004+ *
9005+ * Atomically decrements @v by 1.
9006+ */
9007+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9008+{
9009+ asm volatile(LOCK_PREFIX "decl %0\n"
9010 : "=m" (v->counter)
9011 : "m" (v->counter));
9012 }
9013@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9014 {
9015 unsigned char c;
9016
9017- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9018+ asm volatile(LOCK_PREFIX "decl %0\n"
9019+
9020+#ifdef CONFIG_PAX_REFCOUNT
9021+ "jno 0f\n"
9022+ LOCK_PREFIX "incl %0\n"
9023+ "int $4\n0:\n"
9024+ _ASM_EXTABLE(0b, 0b)
9025+#endif
9026+
9027+ "sete %1\n"
9028 : "=m" (v->counter), "=qm" (c)
9029 : "m" (v->counter) : "memory");
9030 return c != 0;
9031@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9032 {
9033 unsigned char c;
9034
9035- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9036+ asm volatile(LOCK_PREFIX "incl %0\n"
9037+
9038+#ifdef CONFIG_PAX_REFCOUNT
9039+ "jno 0f\n"
9040+ LOCK_PREFIX "decl %0\n"
9041+ "int $4\n0:\n"
9042+ _ASM_EXTABLE(0b, 0b)
9043+#endif
9044+
9045+ "sete %1\n"
9046+ : "=m" (v->counter), "=qm" (c)
9047+ : "m" (v->counter) : "memory");
9048+ return c != 0;
9049+}
9050+
9051+/**
9052+ * atomic_inc_and_test_unchecked - increment and test
9053+ * @v: pointer of type atomic_unchecked_t
9054+ *
9055+ * Atomically increments @v by 1
9056+ * and returns true if the result is zero, or false for all
9057+ * other cases.
9058+ */
9059+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9060+{
9061+ unsigned char c;
9062+
9063+ asm volatile(LOCK_PREFIX "incl %0\n"
9064+ "sete %1\n"
9065 : "=m" (v->counter), "=qm" (c)
9066 : "m" (v->counter) : "memory");
9067 return c != 0;
9068@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9069 {
9070 unsigned char c;
9071
9072- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9073+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9074+
9075+#ifdef CONFIG_PAX_REFCOUNT
9076+ "jno 0f\n"
9077+ LOCK_PREFIX "subl %2,%0\n"
9078+ "int $4\n0:\n"
9079+ _ASM_EXTABLE(0b, 0b)
9080+#endif
9081+
9082+ "sets %1\n"
9083 : "=m" (v->counter), "=qm" (c)
9084 : "ir" (i), "m" (v->counter) : "memory");
9085 return c;
9086@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9087 static inline int atomic_add_return(int i, atomic_t *v)
9088 {
9089 int __i = i;
9090- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9091+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9092+
9093+#ifdef CONFIG_PAX_REFCOUNT
9094+ "jno 0f\n"
9095+ "movl %0, %1\n"
9096+ "int $4\n0:\n"
9097+ _ASM_EXTABLE(0b, 0b)
9098+#endif
9099+
9100+ : "+r" (i), "+m" (v->counter)
9101+ : : "memory");
9102+ return i + __i;
9103+}
9104+
9105+/**
9106+ * atomic_add_return_unchecked - add and return
9107+ * @i: integer value to add
9108+ * @v: pointer of type atomic_unchecked_t
9109+ *
9110+ * Atomically adds @i to @v and returns @i + @v
9111+ */
9112+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9113+{
9114+ int __i = i;
9115+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9116 : "+r" (i), "+m" (v->counter)
9117 : : "memory");
9118 return i + __i;
9119@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9120 }
9121
9122 #define atomic_inc_return(v) (atomic_add_return(1, v))
9123+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9124+{
9125+ return atomic_add_return_unchecked(1, v);
9126+}
9127 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9128
9129 /* The 64-bit atomic type */
9130@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9131 }
9132
9133 /**
9134+ * atomic64_read_unchecked - read atomic64 variable
9135+ * @v: pointer of type atomic64_unchecked_t
9136+ *
9137+ * Atomically reads the value of @v.
9138+ * Doesn't imply a read memory barrier.
9139+ */
9140+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9141+{
9142+ return v->counter;
9143+}
9144+
9145+/**
9146 * atomic64_set - set atomic64 variable
9147 * @v: pointer to type atomic64_t
9148 * @i: required value
9149@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9150 }
9151
9152 /**
9153+ * atomic64_set_unchecked - set atomic64 variable
9154+ * @v: pointer to type atomic64_unchecked_t
9155+ * @i: required value
9156+ *
9157+ * Atomically sets the value of @v to @i.
9158+ */
9159+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9160+{
9161+ v->counter = i;
9162+}
9163+
9164+/**
9165 * atomic64_add - add integer to atomic64 variable
9166 * @i: integer value to add
9167 * @v: pointer to type atomic64_t
9168@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9169 */
9170 static inline void atomic64_add(long i, atomic64_t *v)
9171 {
9172+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9173+
9174+#ifdef CONFIG_PAX_REFCOUNT
9175+ "jno 0f\n"
9176+ LOCK_PREFIX "subq %1,%0\n"
9177+ "int $4\n0:\n"
9178+ _ASM_EXTABLE(0b, 0b)
9179+#endif
9180+
9181+ : "=m" (v->counter)
9182+ : "er" (i), "m" (v->counter));
9183+}
9184+
9185+/**
9186+ * atomic64_add_unchecked - add integer to atomic64 variable
9187+ * @i: integer value to add
9188+ * @v: pointer to type atomic64_unchecked_t
9189+ *
9190+ * Atomically adds @i to @v.
9191+ */
9192+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9193+{
9194 asm volatile(LOCK_PREFIX "addq %1,%0"
9195 : "=m" (v->counter)
9196 : "er" (i), "m" (v->counter));
9197@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9198 */
9199 static inline void atomic64_sub(long i, atomic64_t *v)
9200 {
9201- asm volatile(LOCK_PREFIX "subq %1,%0"
9202+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9203+
9204+#ifdef CONFIG_PAX_REFCOUNT
9205+ "jno 0f\n"
9206+ LOCK_PREFIX "addq %1,%0\n"
9207+ "int $4\n0:\n"
9208+ _ASM_EXTABLE(0b, 0b)
9209+#endif
9210+
9211 : "=m" (v->counter)
9212 : "er" (i), "m" (v->counter));
9213 }
9214@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9215 {
9216 unsigned char c;
9217
9218- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9219+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9220+
9221+#ifdef CONFIG_PAX_REFCOUNT
9222+ "jno 0f\n"
9223+ LOCK_PREFIX "addq %2,%0\n"
9224+ "int $4\n0:\n"
9225+ _ASM_EXTABLE(0b, 0b)
9226+#endif
9227+
9228+ "sete %1\n"
9229 : "=m" (v->counter), "=qm" (c)
9230 : "er" (i), "m" (v->counter) : "memory");
9231 return c;
9232@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9233 */
9234 static inline void atomic64_inc(atomic64_t *v)
9235 {
9236+ asm volatile(LOCK_PREFIX "incq %0\n"
9237+
9238+#ifdef CONFIG_PAX_REFCOUNT
9239+ "jno 0f\n"
9240+ LOCK_PREFIX "decq %0\n"
9241+ "int $4\n0:\n"
9242+ _ASM_EXTABLE(0b, 0b)
9243+#endif
9244+
9245+ : "=m" (v->counter)
9246+ : "m" (v->counter));
9247+}
9248+
9249+/**
9250+ * atomic64_inc_unchecked - increment atomic64 variable
9251+ * @v: pointer to type atomic64_unchecked_t
9252+ *
9253+ * Atomically increments @v by 1.
9254+ */
9255+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9256+{
9257 asm volatile(LOCK_PREFIX "incq %0"
9258 : "=m" (v->counter)
9259 : "m" (v->counter));
9260@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9261 */
9262 static inline void atomic64_dec(atomic64_t *v)
9263 {
9264- asm volatile(LOCK_PREFIX "decq %0"
9265+ asm volatile(LOCK_PREFIX "decq %0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "incq %0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_dec_unchecked - decrement atomic64 variable
9280+ * @v: pointer to type atomic64_t
9281+ *
9282+ * Atomically decrements @v by 1.
9283+ */
9284+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9285+{
9286+ asm volatile(LOCK_PREFIX "decq %0\n"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289 }
9290@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9291 {
9292 unsigned char c;
9293
9294- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9295+ asm volatile(LOCK_PREFIX "decq %0\n"
9296+
9297+#ifdef CONFIG_PAX_REFCOUNT
9298+ "jno 0f\n"
9299+ LOCK_PREFIX "incq %0\n"
9300+ "int $4\n0:\n"
9301+ _ASM_EXTABLE(0b, 0b)
9302+#endif
9303+
9304+ "sete %1\n"
9305 : "=m" (v->counter), "=qm" (c)
9306 : "m" (v->counter) : "memory");
9307 return c != 0;
9308@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9309 {
9310 unsigned char c;
9311
9312- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9313+ asm volatile(LOCK_PREFIX "incq %0\n"
9314+
9315+#ifdef CONFIG_PAX_REFCOUNT
9316+ "jno 0f\n"
9317+ LOCK_PREFIX "decq %0\n"
9318+ "int $4\n0:\n"
9319+ _ASM_EXTABLE(0b, 0b)
9320+#endif
9321+
9322+ "sete %1\n"
9323 : "=m" (v->counter), "=qm" (c)
9324 : "m" (v->counter) : "memory");
9325 return c != 0;
9326@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9327 {
9328 unsigned char c;
9329
9330- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9331+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9332+
9333+#ifdef CONFIG_PAX_REFCOUNT
9334+ "jno 0f\n"
9335+ LOCK_PREFIX "subq %2,%0\n"
9336+ "int $4\n0:\n"
9337+ _ASM_EXTABLE(0b, 0b)
9338+#endif
9339+
9340+ "sets %1\n"
9341 : "=m" (v->counter), "=qm" (c)
9342 : "er" (i), "m" (v->counter) : "memory");
9343 return c;
9344@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9345 static inline long atomic64_add_return(long i, atomic64_t *v)
9346 {
9347 long __i = i;
9348- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9349+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9350+
9351+#ifdef CONFIG_PAX_REFCOUNT
9352+ "jno 0f\n"
9353+ "movq %0, %1\n"
9354+ "int $4\n0:\n"
9355+ _ASM_EXTABLE(0b, 0b)
9356+#endif
9357+
9358+ : "+r" (i), "+m" (v->counter)
9359+ : : "memory");
9360+ return i + __i;
9361+}
9362+
9363+/**
9364+ * atomic64_add_return_unchecked - add and return
9365+ * @i: integer value to add
9366+ * @v: pointer to type atomic64_unchecked_t
9367+ *
9368+ * Atomically adds @i to @v and returns @i + @v
9369+ */
9370+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9371+{
9372+ long __i = i;
9373+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9374 : "+r" (i), "+m" (v->counter)
9375 : : "memory");
9376 return i + __i;
9377@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9378 }
9379
9380 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9381+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9382+{
9383+ return atomic64_add_return_unchecked(1, v);
9384+}
9385 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9386
9387 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9388@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9389 return cmpxchg(&v->counter, old, new);
9390 }
9391
9392+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9393+{
9394+ return cmpxchg(&v->counter, old, new);
9395+}
9396+
9397 static inline long atomic64_xchg(atomic64_t *v, long new)
9398 {
9399 return xchg(&v->counter, new);
9400 }
9401
9402+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9403+{
9404+ return xchg(&v->counter, new);
9405+}
9406+
9407 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9408 {
9409 return cmpxchg(&v->counter, old, new);
9410 }
9411
9412+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9413+{
9414+ return cmpxchg(&v->counter, old, new);
9415+}
9416+
9417 static inline long atomic_xchg(atomic_t *v, int new)
9418 {
9419 return xchg(&v->counter, new);
9420 }
9421
9422+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9423+{
9424+ return xchg(&v->counter, new);
9425+}
9426+
9427 /**
9428 * atomic_add_unless - add unless the number is a given value
9429 * @v: pointer of type atomic_t
9430@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9431 */
9432 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9433 {
9434- int c, old;
9435+ int c, old, new;
9436 c = atomic_read(v);
9437 for (;;) {
9438- if (unlikely(c == (u)))
9439+ if (unlikely(c == u))
9440 break;
9441- old = atomic_cmpxchg((v), c, c + (a));
9442+
9443+ asm volatile("addl %2,%0\n"
9444+
9445+#ifdef CONFIG_PAX_REFCOUNT
9446+ "jno 0f\n"
9447+ "subl %2,%0\n"
9448+ "int $4\n0:\n"
9449+ _ASM_EXTABLE(0b, 0b)
9450+#endif
9451+
9452+ : "=r" (new)
9453+ : "0" (c), "ir" (a));
9454+
9455+ old = atomic_cmpxchg(v, c, new);
9456 if (likely(old == c))
9457 break;
9458 c = old;
9459 }
9460- return c != (u);
9461+ return c != u;
9462 }
9463
9464 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9465@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9466 */
9467 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9468 {
9469- long c, old;
9470+ long c, old, new;
9471 c = atomic64_read(v);
9472 for (;;) {
9473- if (unlikely(c == (u)))
9474+ if (unlikely(c == u))
9475 break;
9476- old = atomic64_cmpxchg((v), c, c + (a));
9477+
9478+ asm volatile("addq %2,%0\n"
9479+
9480+#ifdef CONFIG_PAX_REFCOUNT
9481+ "jno 0f\n"
9482+ "subq %2,%0\n"
9483+ "int $4\n0:\n"
9484+ _ASM_EXTABLE(0b, 0b)
9485+#endif
9486+
9487+ : "=r" (new)
9488+ : "0" (c), "er" (a));
9489+
9490+ old = atomic64_cmpxchg(v, c, new);
9491 if (likely(old == c))
9492 break;
9493 c = old;
9494 }
9495- return c != (u);
9496+ return c != u;
9497 }
9498
9499 /**
9500diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9501index 02b47a6..d5c4b15 100644
9502--- a/arch/x86/include/asm/bitops.h
9503+++ b/arch/x86/include/asm/bitops.h
9504@@ -38,7 +38,7 @@
9505 * a mask operation on a byte.
9506 */
9507 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9508-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9509+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9510 #define CONST_MASK(nr) (1 << ((nr) & 7))
9511
9512 /**
9513diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9514index 7a10659..8bbf355 100644
9515--- a/arch/x86/include/asm/boot.h
9516+++ b/arch/x86/include/asm/boot.h
9517@@ -11,10 +11,15 @@
9518 #include <asm/pgtable_types.h>
9519
9520 /* Physical address where kernel should be loaded. */
9521-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9522+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9523 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9524 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9525
9526+#ifndef __ASSEMBLY__
9527+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9528+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9529+#endif
9530+
9531 /* Minimum kernel alignment, as a power of two */
9532 #ifdef CONFIG_X86_64
9533 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9534diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9535index 549860d..7d45f68 100644
9536--- a/arch/x86/include/asm/cache.h
9537+++ b/arch/x86/include/asm/cache.h
9538@@ -5,9 +5,10 @@
9539
9540 /* L1 cache line size */
9541 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9542-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9543+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9544
9545 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9546+#define __read_only __attribute__((__section__(".data.read_only")))
9547
9548 #ifdef CONFIG_X86_VSMP
9549 /* vSMP Internode cacheline shift */
9550diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9551index b54f6af..5b376a6 100644
9552--- a/arch/x86/include/asm/cacheflush.h
9553+++ b/arch/x86/include/asm/cacheflush.h
9554@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9555 static inline unsigned long get_page_memtype(struct page *pg)
9556 {
9557 if (!PageUncached(pg) && !PageWC(pg))
9558- return -1;
9559+ return ~0UL;
9560 else if (!PageUncached(pg) && PageWC(pg))
9561 return _PAGE_CACHE_WC;
9562 else if (PageUncached(pg) && !PageWC(pg))
9563@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9564 SetPageWC(pg);
9565 break;
9566 default:
9567- case -1:
9568+ case ~0UL:
9569 ClearPageUncached(pg);
9570 ClearPageWC(pg);
9571 break;
9572diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9573index 0e63c9a..ab8d972 100644
9574--- a/arch/x86/include/asm/calling.h
9575+++ b/arch/x86/include/asm/calling.h
9576@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9577 * for assembly code:
9578 */
9579
9580-#define R15 0
9581-#define R14 8
9582-#define R13 16
9583-#define R12 24
9584-#define RBP 32
9585-#define RBX 40
9586+#define R15 (0)
9587+#define R14 (8)
9588+#define R13 (16)
9589+#define R12 (24)
9590+#define RBP (32)
9591+#define RBX (40)
9592
9593 /* arguments: interrupts/non tracing syscalls only save up to here: */
9594-#define R11 48
9595-#define R10 56
9596-#define R9 64
9597-#define R8 72
9598-#define RAX 80
9599-#define RCX 88
9600-#define RDX 96
9601-#define RSI 104
9602-#define RDI 112
9603-#define ORIG_RAX 120 /* + error_code */
9604+#define R11 (48)
9605+#define R10 (56)
9606+#define R9 (64)
9607+#define R8 (72)
9608+#define RAX (80)
9609+#define RCX (88)
9610+#define RDX (96)
9611+#define RSI (104)
9612+#define RDI (112)
9613+#define ORIG_RAX (120) /* + error_code */
9614 /* end of arguments */
9615
9616 /* cpu exception frame or undefined in case of fast syscall: */
9617-#define RIP 128
9618-#define CS 136
9619-#define EFLAGS 144
9620-#define RSP 152
9621-#define SS 160
9622+#define RIP (128)
9623+#define CS (136)
9624+#define EFLAGS (144)
9625+#define RSP (152)
9626+#define SS (160)
9627
9628 #define ARGOFFSET R11
9629 #define SWFRAME ORIG_RAX
9630diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9631index 46fc474..b02b0f9 100644
9632--- a/arch/x86/include/asm/checksum_32.h
9633+++ b/arch/x86/include/asm/checksum_32.h
9634@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9635 int len, __wsum sum,
9636 int *src_err_ptr, int *dst_err_ptr);
9637
9638+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9639+ int len, __wsum sum,
9640+ int *src_err_ptr, int *dst_err_ptr);
9641+
9642+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9643+ int len, __wsum sum,
9644+ int *src_err_ptr, int *dst_err_ptr);
9645+
9646 /*
9647 * Note: when you get a NULL pointer exception here this means someone
9648 * passed in an incorrect kernel address to one of these functions.
9649@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9650 int *err_ptr)
9651 {
9652 might_sleep();
9653- return csum_partial_copy_generic((__force void *)src, dst,
9654+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9655 len, sum, err_ptr, NULL);
9656 }
9657
9658@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9659 {
9660 might_sleep();
9661 if (access_ok(VERIFY_WRITE, dst, len))
9662- return csum_partial_copy_generic(src, (__force void *)dst,
9663+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9664 len, sum, NULL, err_ptr);
9665
9666 if (len)
9667diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9668index 617bd56..7b047a1 100644
9669--- a/arch/x86/include/asm/desc.h
9670+++ b/arch/x86/include/asm/desc.h
9671@@ -4,6 +4,7 @@
9672 #include <asm/desc_defs.h>
9673 #include <asm/ldt.h>
9674 #include <asm/mmu.h>
9675+#include <asm/pgtable.h>
9676 #include <linux/smp.h>
9677
9678 static inline void fill_ldt(struct desc_struct *desc,
9679@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9680 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9681 desc->type = (info->read_exec_only ^ 1) << 1;
9682 desc->type |= info->contents << 2;
9683+ desc->type |= info->seg_not_present ^ 1;
9684 desc->s = 1;
9685 desc->dpl = 0x3;
9686 desc->p = info->seg_not_present ^ 1;
9687@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9688 }
9689
9690 extern struct desc_ptr idt_descr;
9691-extern gate_desc idt_table[];
9692-
9693-struct gdt_page {
9694- struct desc_struct gdt[GDT_ENTRIES];
9695-} __attribute__((aligned(PAGE_SIZE)));
9696-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9697+extern gate_desc idt_table[256];
9698
9699+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9700 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9701 {
9702- return per_cpu(gdt_page, cpu).gdt;
9703+ return cpu_gdt_table[cpu];
9704 }
9705
9706 #ifdef CONFIG_X86_64
9707@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9708 unsigned long base, unsigned dpl, unsigned flags,
9709 unsigned short seg)
9710 {
9711- gate->a = (seg << 16) | (base & 0xffff);
9712- gate->b = (base & 0xffff0000) |
9713- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9714+ gate->gate.offset_low = base;
9715+ gate->gate.seg = seg;
9716+ gate->gate.reserved = 0;
9717+ gate->gate.type = type;
9718+ gate->gate.s = 0;
9719+ gate->gate.dpl = dpl;
9720+ gate->gate.p = 1;
9721+ gate->gate.offset_high = base >> 16;
9722 }
9723
9724 #endif
9725@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9726 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9727 const gate_desc *gate)
9728 {
9729+ pax_open_kernel();
9730 memcpy(&idt[entry], gate, sizeof(*gate));
9731+ pax_close_kernel();
9732 }
9733
9734 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9735 const void *desc)
9736 {
9737+ pax_open_kernel();
9738 memcpy(&ldt[entry], desc, 8);
9739+ pax_close_kernel();
9740 }
9741
9742 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9743@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9744 size = sizeof(struct desc_struct);
9745 break;
9746 }
9747+
9748+ pax_open_kernel();
9749 memcpy(&gdt[entry], desc, size);
9750+ pax_close_kernel();
9751 }
9752
9753 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9754@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9755
9756 static inline void native_load_tr_desc(void)
9757 {
9758+ pax_open_kernel();
9759 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9760+ pax_close_kernel();
9761 }
9762
9763 static inline void native_load_gdt(const struct desc_ptr *dtr)
9764@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9765 unsigned int i;
9766 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9767
9768+ pax_open_kernel();
9769 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9770 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9771+ pax_close_kernel();
9772 }
9773
9774 #define _LDT_empty(info) \
9775@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9776 desc->limit = (limit >> 16) & 0xf;
9777 }
9778
9779-static inline void _set_gate(int gate, unsigned type, void *addr,
9780+static inline void _set_gate(int gate, unsigned type, const void *addr,
9781 unsigned dpl, unsigned ist, unsigned seg)
9782 {
9783 gate_desc s;
9784@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9785 * Pentium F0 0F bugfix can have resulted in the mapped
9786 * IDT being write-protected.
9787 */
9788-static inline void set_intr_gate(unsigned int n, void *addr)
9789+static inline void set_intr_gate(unsigned int n, const void *addr)
9790 {
9791 BUG_ON((unsigned)n > 0xFF);
9792 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9793@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9794 /*
9795 * This routine sets up an interrupt gate at directory privilege level 3.
9796 */
9797-static inline void set_system_intr_gate(unsigned int n, void *addr)
9798+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9799 {
9800 BUG_ON((unsigned)n > 0xFF);
9801 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9802 }
9803
9804-static inline void set_system_trap_gate(unsigned int n, void *addr)
9805+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9806 {
9807 BUG_ON((unsigned)n > 0xFF);
9808 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9809 }
9810
9811-static inline void set_trap_gate(unsigned int n, void *addr)
9812+static inline void set_trap_gate(unsigned int n, const void *addr)
9813 {
9814 BUG_ON((unsigned)n > 0xFF);
9815 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9816@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9817 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9818 {
9819 BUG_ON((unsigned)n > 0xFF);
9820- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9821+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9822 }
9823
9824-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9825+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9826 {
9827 BUG_ON((unsigned)n > 0xFF);
9828 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9829 }
9830
9831-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9832+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9833 {
9834 BUG_ON((unsigned)n > 0xFF);
9835 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9836 }
9837
9838+#ifdef CONFIG_X86_32
9839+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9840+{
9841+ struct desc_struct d;
9842+
9843+ if (likely(limit))
9844+ limit = (limit - 1UL) >> PAGE_SHIFT;
9845+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9846+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9847+}
9848+#endif
9849+
9850 #endif /* _ASM_X86_DESC_H */
9851diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9852index 9d66848..6b4a691 100644
9853--- a/arch/x86/include/asm/desc_defs.h
9854+++ b/arch/x86/include/asm/desc_defs.h
9855@@ -31,6 +31,12 @@ struct desc_struct {
9856 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9857 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9858 };
9859+ struct {
9860+ u16 offset_low;
9861+ u16 seg;
9862+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9863+ unsigned offset_high: 16;
9864+ } gate;
9865 };
9866 } __attribute__((packed));
9867
9868diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9869index cee34e9..a7c3fa2 100644
9870--- a/arch/x86/include/asm/device.h
9871+++ b/arch/x86/include/asm/device.h
9872@@ -6,7 +6,7 @@ struct dev_archdata {
9873 void *acpi_handle;
9874 #endif
9875 #ifdef CONFIG_X86_64
9876-struct dma_map_ops *dma_ops;
9877+ const struct dma_map_ops *dma_ops;
9878 #endif
9879 #ifdef CONFIG_DMAR
9880 void *iommu; /* hook for IOMMU specific extension */
9881diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9882index 6a25d5d..786b202 100644
9883--- a/arch/x86/include/asm/dma-mapping.h
9884+++ b/arch/x86/include/asm/dma-mapping.h
9885@@ -25,9 +25,9 @@ extern int iommu_merge;
9886 extern struct device x86_dma_fallback_dev;
9887 extern int panic_on_overflow;
9888
9889-extern struct dma_map_ops *dma_ops;
9890+extern const struct dma_map_ops *dma_ops;
9891
9892-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9893+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9894 {
9895 #ifdef CONFIG_X86_32
9896 return dma_ops;
9897@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9898 /* Make sure we keep the same behaviour */
9899 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9900 {
9901- struct dma_map_ops *ops = get_dma_ops(dev);
9902+ const struct dma_map_ops *ops = get_dma_ops(dev);
9903 if (ops->mapping_error)
9904 return ops->mapping_error(dev, dma_addr);
9905
9906@@ -122,7 +122,7 @@ static inline void *
9907 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9908 gfp_t gfp)
9909 {
9910- struct dma_map_ops *ops = get_dma_ops(dev);
9911+ const struct dma_map_ops *ops = get_dma_ops(dev);
9912 void *memory;
9913
9914 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9915@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9916 static inline void dma_free_coherent(struct device *dev, size_t size,
9917 void *vaddr, dma_addr_t bus)
9918 {
9919- struct dma_map_ops *ops = get_dma_ops(dev);
9920+ const struct dma_map_ops *ops = get_dma_ops(dev);
9921
9922 WARN_ON(irqs_disabled()); /* for portability */
9923
9924diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9925index 40b4e61..40d8133 100644
9926--- a/arch/x86/include/asm/e820.h
9927+++ b/arch/x86/include/asm/e820.h
9928@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9929 #define ISA_END_ADDRESS 0x100000
9930 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9931
9932-#define BIOS_BEGIN 0x000a0000
9933+#define BIOS_BEGIN 0x000c0000
9934 #define BIOS_END 0x00100000
9935
9936 #ifdef __KERNEL__
9937diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9938index 8ac9d9a..0a6c96e 100644
9939--- a/arch/x86/include/asm/elf.h
9940+++ b/arch/x86/include/asm/elf.h
9941@@ -257,7 +257,25 @@ extern int force_personality32;
9942 the loader. We need to make sure that it is out of the way of the program
9943 that it will "exec", and that there is sufficient room for the brk. */
9944
9945+#ifdef CONFIG_PAX_SEGMEXEC
9946+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9947+#else
9948 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9949+#endif
9950+
9951+#ifdef CONFIG_PAX_ASLR
9952+#ifdef CONFIG_X86_32
9953+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9954+
9955+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9956+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9957+#else
9958+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9959+
9960+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9961+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9962+#endif
9963+#endif
9964
9965 /* This yields a mask that user programs can use to figure out what
9966 instruction set this CPU supports. This could be done in user space,
9967@@ -310,9 +328,7 @@ do { \
9968
9969 #define ARCH_DLINFO \
9970 do { \
9971- if (vdso_enabled) \
9972- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9973- (unsigned long)current->mm->context.vdso); \
9974+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9975 } while (0)
9976
9977 #define AT_SYSINFO 32
9978@@ -323,7 +339,7 @@ do { \
9979
9980 #endif /* !CONFIG_X86_32 */
9981
9982-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9983+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9984
9985 #define VDSO_ENTRY \
9986 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9987@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9988 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9989 #define compat_arch_setup_additional_pages syscall32_setup_pages
9990
9991-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9992-#define arch_randomize_brk arch_randomize_brk
9993-
9994 #endif /* _ASM_X86_ELF_H */
9995diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9996index cc70c1c..d96d011 100644
9997--- a/arch/x86/include/asm/emergency-restart.h
9998+++ b/arch/x86/include/asm/emergency-restart.h
9999@@ -15,6 +15,6 @@ enum reboot_type {
10000
10001 extern enum reboot_type reboot_type;
10002
10003-extern void machine_emergency_restart(void);
10004+extern void machine_emergency_restart(void) __noreturn;
10005
10006 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10007diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10008index 1f11ce4..7caabd1 100644
10009--- a/arch/x86/include/asm/futex.h
10010+++ b/arch/x86/include/asm/futex.h
10011@@ -12,16 +12,18 @@
10012 #include <asm/system.h>
10013
10014 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10015+ typecheck(u32 __user *, uaddr); \
10016 asm volatile("1:\t" insn "\n" \
10017 "2:\t.section .fixup,\"ax\"\n" \
10018 "3:\tmov\t%3, %1\n" \
10019 "\tjmp\t2b\n" \
10020 "\t.previous\n" \
10021 _ASM_EXTABLE(1b, 3b) \
10022- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10023+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10024 : "i" (-EFAULT), "0" (oparg), "1" (0))
10025
10026 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10027+ typecheck(u32 __user *, uaddr); \
10028 asm volatile("1:\tmovl %2, %0\n" \
10029 "\tmovl\t%0, %3\n" \
10030 "\t" insn "\n" \
10031@@ -34,10 +36,10 @@
10032 _ASM_EXTABLE(1b, 4b) \
10033 _ASM_EXTABLE(2b, 4b) \
10034 : "=&a" (oldval), "=&r" (ret), \
10035- "+m" (*uaddr), "=&r" (tem) \
10036+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10037 : "r" (oparg), "i" (-EFAULT), "1" (0))
10038
10039-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10040+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10041 {
10042 int op = (encoded_op >> 28) & 7;
10043 int cmp = (encoded_op >> 24) & 15;
10044@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10045
10046 switch (op) {
10047 case FUTEX_OP_SET:
10048- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10049+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10050 break;
10051 case FUTEX_OP_ADD:
10052- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10053+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10054 uaddr, oparg);
10055 break;
10056 case FUTEX_OP_OR:
10057@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10058 return ret;
10059 }
10060
10061-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10062+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10063 int newval)
10064 {
10065
10066@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10067 return -ENOSYS;
10068 #endif
10069
10070- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10071+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10072 return -EFAULT;
10073
10074- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10075+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10076 "2:\t.section .fixup, \"ax\"\n"
10077 "3:\tmov %2, %0\n"
10078 "\tjmp 2b\n"
10079 "\t.previous\n"
10080 _ASM_EXTABLE(1b, 3b)
10081- : "=a" (oldval), "+m" (*uaddr)
10082+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10083 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10084 : "memory"
10085 );
10086diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10087index ba180d9..3bad351 100644
10088--- a/arch/x86/include/asm/hw_irq.h
10089+++ b/arch/x86/include/asm/hw_irq.h
10090@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10091 extern void enable_IO_APIC(void);
10092
10093 /* Statistics */
10094-extern atomic_t irq_err_count;
10095-extern atomic_t irq_mis_count;
10096+extern atomic_unchecked_t irq_err_count;
10097+extern atomic_unchecked_t irq_mis_count;
10098
10099 /* EISA */
10100 extern void eisa_set_level_irq(unsigned int irq);
10101diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10102index 0b20bbb..4cb1396 100644
10103--- a/arch/x86/include/asm/i387.h
10104+++ b/arch/x86/include/asm/i387.h
10105@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10106 {
10107 int err;
10108
10109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10110+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10111+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10112+#endif
10113+
10114 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10115 "2:\n"
10116 ".section .fixup,\"ax\"\n"
10117@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10118 {
10119 int err;
10120
10121+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10122+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10123+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10124+#endif
10125+
10126 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10127 "2:\n"
10128 ".section .fixup,\"ax\"\n"
10129@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10130 }
10131
10132 /* We need a safe address that is cheap to find and that is already
10133- in L1 during context switch. The best choices are unfortunately
10134- different for UP and SMP */
10135-#ifdef CONFIG_SMP
10136-#define safe_address (__per_cpu_offset[0])
10137-#else
10138-#define safe_address (kstat_cpu(0).cpustat.user)
10139-#endif
10140+ in L1 during context switch. */
10141+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10142
10143 /*
10144 * These must be called with preempt disabled
10145@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10146 struct thread_info *me = current_thread_info();
10147 preempt_disable();
10148 if (me->status & TS_USEDFPU)
10149- __save_init_fpu(me->task);
10150+ __save_init_fpu(current);
10151 else
10152 clts();
10153 }
10154diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10155index a299900..15c5410 100644
10156--- a/arch/x86/include/asm/io_32.h
10157+++ b/arch/x86/include/asm/io_32.h
10158@@ -3,6 +3,7 @@
10159
10160 #include <linux/string.h>
10161 #include <linux/compiler.h>
10162+#include <asm/processor.h>
10163
10164 /*
10165 * This file contains the definitions for the x86 IO instructions
10166@@ -42,6 +43,17 @@
10167
10168 #ifdef __KERNEL__
10169
10170+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10171+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10172+{
10173+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10174+}
10175+
10176+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10177+{
10178+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10179+}
10180+
10181 #include <asm-generic/iomap.h>
10182
10183 #include <linux/vmalloc.h>
10184diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10185index 2440678..c158b88 100644
10186--- a/arch/x86/include/asm/io_64.h
10187+++ b/arch/x86/include/asm/io_64.h
10188@@ -140,6 +140,17 @@ __OUTS(l)
10189
10190 #include <linux/vmalloc.h>
10191
10192+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10193+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10194+{
10195+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10196+}
10197+
10198+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10199+{
10200+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10201+}
10202+
10203 #include <asm-generic/iomap.h>
10204
10205 void __memcpy_fromio(void *, unsigned long, unsigned);
10206diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10207index fd6d21b..8b13915 100644
10208--- a/arch/x86/include/asm/iommu.h
10209+++ b/arch/x86/include/asm/iommu.h
10210@@ -3,7 +3,7 @@
10211
10212 extern void pci_iommu_shutdown(void);
10213 extern void no_iommu_init(void);
10214-extern struct dma_map_ops nommu_dma_ops;
10215+extern const struct dma_map_ops nommu_dma_ops;
10216 extern int force_iommu, no_iommu;
10217 extern int iommu_detected;
10218 extern int iommu_pass_through;
10219diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10220index 9e2b952..557206e 100644
10221--- a/arch/x86/include/asm/irqflags.h
10222+++ b/arch/x86/include/asm/irqflags.h
10223@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10224 sti; \
10225 sysexit
10226
10227+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10228+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10229+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10230+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10231+
10232 #else
10233 #define INTERRUPT_RETURN iret
10234 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10235diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10236index 4fe681d..bb6d40c 100644
10237--- a/arch/x86/include/asm/kprobes.h
10238+++ b/arch/x86/include/asm/kprobes.h
10239@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10240 #define BREAKPOINT_INSTRUCTION 0xcc
10241 #define RELATIVEJUMP_INSTRUCTION 0xe9
10242 #define MAX_INSN_SIZE 16
10243-#define MAX_STACK_SIZE 64
10244-#define MIN_STACK_SIZE(ADDR) \
10245- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10246- THREAD_SIZE - (unsigned long)(ADDR))) \
10247- ? (MAX_STACK_SIZE) \
10248- : (((unsigned long)current_thread_info()) + \
10249- THREAD_SIZE - (unsigned long)(ADDR)))
10250+#define MAX_STACK_SIZE 64UL
10251+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10252
10253 #define flush_insn_slot(p) do { } while (0)
10254
10255diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10256index 08bc2ff..2e88d1f 100644
10257--- a/arch/x86/include/asm/kvm_host.h
10258+++ b/arch/x86/include/asm/kvm_host.h
10259@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10260 bool (*gb_page_enable)(void);
10261
10262 const struct trace_print_flags *exit_reasons_str;
10263-};
10264+} __do_const;
10265
10266-extern struct kvm_x86_ops *kvm_x86_ops;
10267+extern const struct kvm_x86_ops *kvm_x86_ops;
10268
10269 int kvm_mmu_module_init(void);
10270 void kvm_mmu_module_exit(void);
10271diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10272index 47b9b6f..815aaa1 100644
10273--- a/arch/x86/include/asm/local.h
10274+++ b/arch/x86/include/asm/local.h
10275@@ -18,26 +18,58 @@ typedef struct {
10276
10277 static inline void local_inc(local_t *l)
10278 {
10279- asm volatile(_ASM_INC "%0"
10280+ asm volatile(_ASM_INC "%0\n"
10281+
10282+#ifdef CONFIG_PAX_REFCOUNT
10283+ "jno 0f\n"
10284+ _ASM_DEC "%0\n"
10285+ "int $4\n0:\n"
10286+ _ASM_EXTABLE(0b, 0b)
10287+#endif
10288+
10289 : "+m" (l->a.counter));
10290 }
10291
10292 static inline void local_dec(local_t *l)
10293 {
10294- asm volatile(_ASM_DEC "%0"
10295+ asm volatile(_ASM_DEC "%0\n"
10296+
10297+#ifdef CONFIG_PAX_REFCOUNT
10298+ "jno 0f\n"
10299+ _ASM_INC "%0\n"
10300+ "int $4\n0:\n"
10301+ _ASM_EXTABLE(0b, 0b)
10302+#endif
10303+
10304 : "+m" (l->a.counter));
10305 }
10306
10307 static inline void local_add(long i, local_t *l)
10308 {
10309- asm volatile(_ASM_ADD "%1,%0"
10310+ asm volatile(_ASM_ADD "%1,%0\n"
10311+
10312+#ifdef CONFIG_PAX_REFCOUNT
10313+ "jno 0f\n"
10314+ _ASM_SUB "%1,%0\n"
10315+ "int $4\n0:\n"
10316+ _ASM_EXTABLE(0b, 0b)
10317+#endif
10318+
10319 : "+m" (l->a.counter)
10320 : "ir" (i));
10321 }
10322
10323 static inline void local_sub(long i, local_t *l)
10324 {
10325- asm volatile(_ASM_SUB "%1,%0"
10326+ asm volatile(_ASM_SUB "%1,%0\n"
10327+
10328+#ifdef CONFIG_PAX_REFCOUNT
10329+ "jno 0f\n"
10330+ _ASM_ADD "%1,%0\n"
10331+ "int $4\n0:\n"
10332+ _ASM_EXTABLE(0b, 0b)
10333+#endif
10334+
10335 : "+m" (l->a.counter)
10336 : "ir" (i));
10337 }
10338@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10339 {
10340 unsigned char c;
10341
10342- asm volatile(_ASM_SUB "%2,%0; sete %1"
10343+ asm volatile(_ASM_SUB "%2,%0\n"
10344+
10345+#ifdef CONFIG_PAX_REFCOUNT
10346+ "jno 0f\n"
10347+ _ASM_ADD "%2,%0\n"
10348+ "int $4\n0:\n"
10349+ _ASM_EXTABLE(0b, 0b)
10350+#endif
10351+
10352+ "sete %1\n"
10353 : "+m" (l->a.counter), "=qm" (c)
10354 : "ir" (i) : "memory");
10355 return c;
10356@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10357 {
10358 unsigned char c;
10359
10360- asm volatile(_ASM_DEC "%0; sete %1"
10361+ asm volatile(_ASM_DEC "%0\n"
10362+
10363+#ifdef CONFIG_PAX_REFCOUNT
10364+ "jno 0f\n"
10365+ _ASM_INC "%0\n"
10366+ "int $4\n0:\n"
10367+ _ASM_EXTABLE(0b, 0b)
10368+#endif
10369+
10370+ "sete %1\n"
10371 : "+m" (l->a.counter), "=qm" (c)
10372 : : "memory");
10373 return c != 0;
10374@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10375 {
10376 unsigned char c;
10377
10378- asm volatile(_ASM_INC "%0; sete %1"
10379+ asm volatile(_ASM_INC "%0\n"
10380+
10381+#ifdef CONFIG_PAX_REFCOUNT
10382+ "jno 0f\n"
10383+ _ASM_DEC "%0\n"
10384+ "int $4\n0:\n"
10385+ _ASM_EXTABLE(0b, 0b)
10386+#endif
10387+
10388+ "sete %1\n"
10389 : "+m" (l->a.counter), "=qm" (c)
10390 : : "memory");
10391 return c != 0;
10392@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10393 {
10394 unsigned char c;
10395
10396- asm volatile(_ASM_ADD "%2,%0; sets %1"
10397+ asm volatile(_ASM_ADD "%2,%0\n"
10398+
10399+#ifdef CONFIG_PAX_REFCOUNT
10400+ "jno 0f\n"
10401+ _ASM_SUB "%2,%0\n"
10402+ "int $4\n0:\n"
10403+ _ASM_EXTABLE(0b, 0b)
10404+#endif
10405+
10406+ "sets %1\n"
10407 : "+m" (l->a.counter), "=qm" (c)
10408 : "ir" (i) : "memory");
10409 return c;
10410@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10411 #endif
10412 /* Modern 486+ processor */
10413 __i = i;
10414- asm volatile(_ASM_XADD "%0, %1;"
10415+ asm volatile(_ASM_XADD "%0, %1\n"
10416+
10417+#ifdef CONFIG_PAX_REFCOUNT
10418+ "jno 0f\n"
10419+ _ASM_MOV "%0,%1\n"
10420+ "int $4\n0:\n"
10421+ _ASM_EXTABLE(0b, 0b)
10422+#endif
10423+
10424 : "+r" (i), "+m" (l->a.counter)
10425 : : "memory");
10426 return i + __i;
10427diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10428index ef51b50..514ba37 100644
10429--- a/arch/x86/include/asm/microcode.h
10430+++ b/arch/x86/include/asm/microcode.h
10431@@ -12,13 +12,13 @@ struct device;
10432 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10433
10434 struct microcode_ops {
10435- enum ucode_state (*request_microcode_user) (int cpu,
10436+ enum ucode_state (* const request_microcode_user) (int cpu,
10437 const void __user *buf, size_t size);
10438
10439- enum ucode_state (*request_microcode_fw) (int cpu,
10440+ enum ucode_state (* const request_microcode_fw) (int cpu,
10441 struct device *device);
10442
10443- void (*microcode_fini_cpu) (int cpu);
10444+ void (* const microcode_fini_cpu) (int cpu);
10445
10446 /*
10447 * The generic 'microcode_core' part guarantees that
10448@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10449 extern struct ucode_cpu_info ucode_cpu_info[];
10450
10451 #ifdef CONFIG_MICROCODE_INTEL
10452-extern struct microcode_ops * __init init_intel_microcode(void);
10453+extern const struct microcode_ops * __init init_intel_microcode(void);
10454 #else
10455-static inline struct microcode_ops * __init init_intel_microcode(void)
10456+static inline const struct microcode_ops * __init init_intel_microcode(void)
10457 {
10458 return NULL;
10459 }
10460 #endif /* CONFIG_MICROCODE_INTEL */
10461
10462 #ifdef CONFIG_MICROCODE_AMD
10463-extern struct microcode_ops * __init init_amd_microcode(void);
10464+extern const struct microcode_ops * __init init_amd_microcode(void);
10465 #else
10466-static inline struct microcode_ops * __init init_amd_microcode(void)
10467+static inline const struct microcode_ops * __init init_amd_microcode(void)
10468 {
10469 return NULL;
10470 }
10471diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10472index 593e51d..fa69c9a 100644
10473--- a/arch/x86/include/asm/mman.h
10474+++ b/arch/x86/include/asm/mman.h
10475@@ -5,4 +5,14 @@
10476
10477 #include <asm-generic/mman.h>
10478
10479+#ifdef __KERNEL__
10480+#ifndef __ASSEMBLY__
10481+#ifdef CONFIG_X86_32
10482+#define arch_mmap_check i386_mmap_check
10483+int i386_mmap_check(unsigned long addr, unsigned long len,
10484+ unsigned long flags);
10485+#endif
10486+#endif
10487+#endif
10488+
10489 #endif /* _ASM_X86_MMAN_H */
10490diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10491index 80a1dee..239c67d 100644
10492--- a/arch/x86/include/asm/mmu.h
10493+++ b/arch/x86/include/asm/mmu.h
10494@@ -9,10 +9,23 @@
10495 * we put the segment information here.
10496 */
10497 typedef struct {
10498- void *ldt;
10499+ struct desc_struct *ldt;
10500 int size;
10501 struct mutex lock;
10502- void *vdso;
10503+ unsigned long vdso;
10504+
10505+#ifdef CONFIG_X86_32
10506+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10507+ unsigned long user_cs_base;
10508+ unsigned long user_cs_limit;
10509+
10510+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10511+ cpumask_t cpu_user_cs_mask;
10512+#endif
10513+
10514+#endif
10515+#endif
10516+
10517 } mm_context_t;
10518
10519 #ifdef CONFIG_SMP
10520diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10521index 8b5393e..8143173 100644
10522--- a/arch/x86/include/asm/mmu_context.h
10523+++ b/arch/x86/include/asm/mmu_context.h
10524@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10525
10526 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10527 {
10528+
10529+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10530+ unsigned int i;
10531+ pgd_t *pgd;
10532+
10533+ pax_open_kernel();
10534+ pgd = get_cpu_pgd(smp_processor_id());
10535+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10536+ set_pgd_batched(pgd+i, native_make_pgd(0));
10537+ pax_close_kernel();
10538+#endif
10539+
10540 #ifdef CONFIG_SMP
10541 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10542 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10543@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10544 struct task_struct *tsk)
10545 {
10546 unsigned cpu = smp_processor_id();
10547+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10548+ int tlbstate = TLBSTATE_OK;
10549+#endif
10550
10551 if (likely(prev != next)) {
10552 #ifdef CONFIG_SMP
10553+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10554+ tlbstate = percpu_read(cpu_tlbstate.state);
10555+#endif
10556 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10557 percpu_write(cpu_tlbstate.active_mm, next);
10558 #endif
10559 cpumask_set_cpu(cpu, mm_cpumask(next));
10560
10561 /* Re-load page tables */
10562+#ifdef CONFIG_PAX_PER_CPU_PGD
10563+ pax_open_kernel();
10564+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10565+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10566+ pax_close_kernel();
10567+ load_cr3(get_cpu_pgd(cpu));
10568+#else
10569 load_cr3(next->pgd);
10570+#endif
10571
10572 /* stop flush ipis for the previous mm */
10573 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10574@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10575 */
10576 if (unlikely(prev->context.ldt != next->context.ldt))
10577 load_LDT_nolock(&next->context);
10578- }
10579+
10580+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10581+ if (!nx_enabled) {
10582+ smp_mb__before_clear_bit();
10583+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10584+ smp_mb__after_clear_bit();
10585+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10586+ }
10587+#endif
10588+
10589+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10590+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10591+ prev->context.user_cs_limit != next->context.user_cs_limit))
10592+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10593 #ifdef CONFIG_SMP
10594+ else if (unlikely(tlbstate != TLBSTATE_OK))
10595+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10596+#endif
10597+#endif
10598+
10599+ }
10600 else {
10601+
10602+#ifdef CONFIG_PAX_PER_CPU_PGD
10603+ pax_open_kernel();
10604+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10605+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10606+ pax_close_kernel();
10607+ load_cr3(get_cpu_pgd(cpu));
10608+#endif
10609+
10610+#ifdef CONFIG_SMP
10611 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10612 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10613
10614@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10615 * tlb flush IPI delivery. We must reload CR3
10616 * to make sure to use no freed page tables.
10617 */
10618+
10619+#ifndef CONFIG_PAX_PER_CPU_PGD
10620 load_cr3(next->pgd);
10621+#endif
10622+
10623 load_LDT_nolock(&next->context);
10624+
10625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10626+ if (!nx_enabled)
10627+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10628+#endif
10629+
10630+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10631+#ifdef CONFIG_PAX_PAGEEXEC
10632+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10633+#endif
10634+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10635+#endif
10636+
10637 }
10638- }
10639 #endif
10640+ }
10641 }
10642
10643 #define activate_mm(prev, next) \
10644diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10645index 3e2ce58..caaf478 100644
10646--- a/arch/x86/include/asm/module.h
10647+++ b/arch/x86/include/asm/module.h
10648@@ -5,6 +5,7 @@
10649
10650 #ifdef CONFIG_X86_64
10651 /* X86_64 does not define MODULE_PROC_FAMILY */
10652+#define MODULE_PROC_FAMILY ""
10653 #elif defined CONFIG_M386
10654 #define MODULE_PROC_FAMILY "386 "
10655 #elif defined CONFIG_M486
10656@@ -59,13 +60,26 @@
10657 #error unknown processor family
10658 #endif
10659
10660-#ifdef CONFIG_X86_32
10661-# ifdef CONFIG_4KSTACKS
10662-# define MODULE_STACKSIZE "4KSTACKS "
10663-# else
10664-# define MODULE_STACKSIZE ""
10665-# endif
10666-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10667+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10668+#define MODULE_STACKSIZE "4KSTACKS "
10669+#else
10670+#define MODULE_STACKSIZE ""
10671+#endif
10672+
10673+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10674+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10675+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10676+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10677+#else
10678+#define MODULE_PAX_KERNEXEC ""
10679 #endif
10680
10681+#ifdef CONFIG_PAX_MEMORY_UDEREF
10682+#define MODULE_PAX_UDEREF "UDEREF "
10683+#else
10684+#define MODULE_PAX_UDEREF ""
10685+#endif
10686+
10687+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10688+
10689 #endif /* _ASM_X86_MODULE_H */
10690diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10691index 7639dbf..e08a58c 100644
10692--- a/arch/x86/include/asm/page_64_types.h
10693+++ b/arch/x86/include/asm/page_64_types.h
10694@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10695
10696 /* duplicated to the one in bootmem.h */
10697 extern unsigned long max_pfn;
10698-extern unsigned long phys_base;
10699+extern const unsigned long phys_base;
10700
10701 extern unsigned long __phys_addr(unsigned long);
10702 #define __phys_reloc_hide(x) (x)
10703diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10704index efb3899..ef30687 100644
10705--- a/arch/x86/include/asm/paravirt.h
10706+++ b/arch/x86/include/asm/paravirt.h
10707@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10708 val);
10709 }
10710
10711+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10712+{
10713+ pgdval_t val = native_pgd_val(pgd);
10714+
10715+ if (sizeof(pgdval_t) > sizeof(long))
10716+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10717+ val, (u64)val >> 32);
10718+ else
10719+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10720+ val);
10721+}
10722+
10723 static inline void pgd_clear(pgd_t *pgdp)
10724 {
10725 set_pgd(pgdp, __pgd(0));
10726@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10727 pv_mmu_ops.set_fixmap(idx, phys, flags);
10728 }
10729
10730+#ifdef CONFIG_PAX_KERNEXEC
10731+static inline unsigned long pax_open_kernel(void)
10732+{
10733+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10734+}
10735+
10736+static inline unsigned long pax_close_kernel(void)
10737+{
10738+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10739+}
10740+#else
10741+static inline unsigned long pax_open_kernel(void) { return 0; }
10742+static inline unsigned long pax_close_kernel(void) { return 0; }
10743+#endif
10744+
10745 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10746
10747 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10748@@ -945,7 +972,7 @@ extern void default_banner(void);
10749
10750 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10751 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10752-#define PARA_INDIRECT(addr) *%cs:addr
10753+#define PARA_INDIRECT(addr) *%ss:addr
10754 #endif
10755
10756 #define INTERRUPT_RETURN \
10757@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10758 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10759 CLBR_NONE, \
10760 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10761+
10762+#define GET_CR0_INTO_RDI \
10763+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10764+ mov %rax,%rdi
10765+
10766+#define SET_RDI_INTO_CR0 \
10767+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10768+
10769+#define GET_CR3_INTO_RDI \
10770+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10771+ mov %rax,%rdi
10772+
10773+#define SET_RDI_INTO_CR3 \
10774+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10775+
10776 #endif /* CONFIG_X86_32 */
10777
10778 #endif /* __ASSEMBLY__ */
10779diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10780index 9357473..aeb2de5 100644
10781--- a/arch/x86/include/asm/paravirt_types.h
10782+++ b/arch/x86/include/asm/paravirt_types.h
10783@@ -78,19 +78,19 @@ struct pv_init_ops {
10784 */
10785 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10786 unsigned long addr, unsigned len);
10787-};
10788+} __no_const;
10789
10790
10791 struct pv_lazy_ops {
10792 /* Set deferred update mode, used for batching operations. */
10793 void (*enter)(void);
10794 void (*leave)(void);
10795-};
10796+} __no_const;
10797
10798 struct pv_time_ops {
10799 unsigned long long (*sched_clock)(void);
10800 unsigned long (*get_tsc_khz)(void);
10801-};
10802+} __no_const;
10803
10804 struct pv_cpu_ops {
10805 /* hooks for various privileged instructions */
10806@@ -186,7 +186,7 @@ struct pv_cpu_ops {
10807
10808 void (*start_context_switch)(struct task_struct *prev);
10809 void (*end_context_switch)(struct task_struct *next);
10810-};
10811+} __no_const;
10812
10813 struct pv_irq_ops {
10814 /*
10815@@ -217,7 +217,7 @@ struct pv_apic_ops {
10816 unsigned long start_eip,
10817 unsigned long start_esp);
10818 #endif
10819-};
10820+} __no_const;
10821
10822 struct pv_mmu_ops {
10823 unsigned long (*read_cr2)(void);
10824@@ -301,6 +301,7 @@ struct pv_mmu_ops {
10825 struct paravirt_callee_save make_pud;
10826
10827 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10828+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10829 #endif /* PAGETABLE_LEVELS == 4 */
10830 #endif /* PAGETABLE_LEVELS >= 3 */
10831
10832@@ -316,6 +317,12 @@ struct pv_mmu_ops {
10833 an mfn. We can tell which is which from the index. */
10834 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10835 phys_addr_t phys, pgprot_t flags);
10836+
10837+#ifdef CONFIG_PAX_KERNEXEC
10838+ unsigned long (*pax_open_kernel)(void);
10839+ unsigned long (*pax_close_kernel)(void);
10840+#endif
10841+
10842 };
10843
10844 struct raw_spinlock;
10845@@ -326,7 +333,7 @@ struct pv_lock_ops {
10846 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10847 int (*spin_trylock)(struct raw_spinlock *lock);
10848 void (*spin_unlock)(struct raw_spinlock *lock);
10849-};
10850+} __no_const;
10851
10852 /* This contains all the paravirt structures: we get a convenient
10853 * number for each function using the offset which we use to indicate
10854diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10855index b399988..3f47c38 100644
10856--- a/arch/x86/include/asm/pci_x86.h
10857+++ b/arch/x86/include/asm/pci_x86.h
10858@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10859 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10860
10861 struct pci_raw_ops {
10862- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10863+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10864 int reg, int len, u32 *val);
10865- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10866+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10867 int reg, int len, u32 val);
10868 };
10869
10870-extern struct pci_raw_ops *raw_pci_ops;
10871-extern struct pci_raw_ops *raw_pci_ext_ops;
10872+extern const struct pci_raw_ops *raw_pci_ops;
10873+extern const struct pci_raw_ops *raw_pci_ext_ops;
10874
10875-extern struct pci_raw_ops pci_direct_conf1;
10876+extern const struct pci_raw_ops pci_direct_conf1;
10877 extern bool port_cf9_safe;
10878
10879 /* arch_initcall level */
10880diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10881index b65a36d..50345a4 100644
10882--- a/arch/x86/include/asm/percpu.h
10883+++ b/arch/x86/include/asm/percpu.h
10884@@ -78,6 +78,7 @@ do { \
10885 if (0) { \
10886 T__ tmp__; \
10887 tmp__ = (val); \
10888+ (void)tmp__; \
10889 } \
10890 switch (sizeof(var)) { \
10891 case 1: \
10892diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10893index 271de94..ef944d6 100644
10894--- a/arch/x86/include/asm/pgalloc.h
10895+++ b/arch/x86/include/asm/pgalloc.h
10896@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10897 pmd_t *pmd, pte_t *pte)
10898 {
10899 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10900+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10901+}
10902+
10903+static inline void pmd_populate_user(struct mm_struct *mm,
10904+ pmd_t *pmd, pte_t *pte)
10905+{
10906+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10907 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10908 }
10909
10910diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10911index 2334982..70bc412 100644
10912--- a/arch/x86/include/asm/pgtable-2level.h
10913+++ b/arch/x86/include/asm/pgtable-2level.h
10914@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10915
10916 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10917 {
10918+ pax_open_kernel();
10919 *pmdp = pmd;
10920+ pax_close_kernel();
10921 }
10922
10923 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10924diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10925index 33927d2..ccde329 100644
10926--- a/arch/x86/include/asm/pgtable-3level.h
10927+++ b/arch/x86/include/asm/pgtable-3level.h
10928@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10929
10930 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10931 {
10932+ pax_open_kernel();
10933 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10934+ pax_close_kernel();
10935 }
10936
10937 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10938 {
10939+ pax_open_kernel();
10940 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10941+ pax_close_kernel();
10942 }
10943
10944 /*
10945diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10946index af6fd36..867ff74 100644
10947--- a/arch/x86/include/asm/pgtable.h
10948+++ b/arch/x86/include/asm/pgtable.h
10949@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10950
10951 #ifndef __PAGETABLE_PUD_FOLDED
10952 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10953+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10954 #define pgd_clear(pgd) native_pgd_clear(pgd)
10955 #endif
10956
10957@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10958
10959 #define arch_end_context_switch(prev) do {} while(0)
10960
10961+#define pax_open_kernel() native_pax_open_kernel()
10962+#define pax_close_kernel() native_pax_close_kernel()
10963 #endif /* CONFIG_PARAVIRT */
10964
10965+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10966+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10967+
10968+#ifdef CONFIG_PAX_KERNEXEC
10969+static inline unsigned long native_pax_open_kernel(void)
10970+{
10971+ unsigned long cr0;
10972+
10973+ preempt_disable();
10974+ barrier();
10975+ cr0 = read_cr0() ^ X86_CR0_WP;
10976+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
10977+ write_cr0(cr0);
10978+ return cr0 ^ X86_CR0_WP;
10979+}
10980+
10981+static inline unsigned long native_pax_close_kernel(void)
10982+{
10983+ unsigned long cr0;
10984+
10985+ cr0 = read_cr0() ^ X86_CR0_WP;
10986+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10987+ write_cr0(cr0);
10988+ barrier();
10989+ preempt_enable_no_resched();
10990+ return cr0 ^ X86_CR0_WP;
10991+}
10992+#else
10993+static inline unsigned long native_pax_open_kernel(void) { return 0; }
10994+static inline unsigned long native_pax_close_kernel(void) { return 0; }
10995+#endif
10996+
10997 /*
10998 * The following only work if pte_present() is true.
10999 * Undefined behaviour if not..
11000 */
11001+static inline int pte_user(pte_t pte)
11002+{
11003+ return pte_val(pte) & _PAGE_USER;
11004+}
11005+
11006 static inline int pte_dirty(pte_t pte)
11007 {
11008 return pte_flags(pte) & _PAGE_DIRTY;
11009@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11010 return pte_clear_flags(pte, _PAGE_RW);
11011 }
11012
11013+static inline pte_t pte_mkread(pte_t pte)
11014+{
11015+ return __pte(pte_val(pte) | _PAGE_USER);
11016+}
11017+
11018 static inline pte_t pte_mkexec(pte_t pte)
11019 {
11020- return pte_clear_flags(pte, _PAGE_NX);
11021+#ifdef CONFIG_X86_PAE
11022+ if (__supported_pte_mask & _PAGE_NX)
11023+ return pte_clear_flags(pte, _PAGE_NX);
11024+ else
11025+#endif
11026+ return pte_set_flags(pte, _PAGE_USER);
11027+}
11028+
11029+static inline pte_t pte_exprotect(pte_t pte)
11030+{
11031+#ifdef CONFIG_X86_PAE
11032+ if (__supported_pte_mask & _PAGE_NX)
11033+ return pte_set_flags(pte, _PAGE_NX);
11034+ else
11035+#endif
11036+ return pte_clear_flags(pte, _PAGE_USER);
11037 }
11038
11039 static inline pte_t pte_mkdirty(pte_t pte)
11040@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11041 #endif
11042
11043 #ifndef __ASSEMBLY__
11044+
11045+#ifdef CONFIG_PAX_PER_CPU_PGD
11046+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11047+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11048+{
11049+ return cpu_pgd[cpu];
11050+}
11051+#endif
11052+
11053 #include <linux/mm_types.h>
11054
11055 static inline int pte_none(pte_t pte)
11056@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11057
11058 static inline int pgd_bad(pgd_t pgd)
11059 {
11060- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11061+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11062 }
11063
11064 static inline int pgd_none(pgd_t pgd)
11065@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11066 * pgd_offset() returns a (pgd_t *)
11067 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11068 */
11069-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11070+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11071+
11072+#ifdef CONFIG_PAX_PER_CPU_PGD
11073+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11074+#endif
11075+
11076 /*
11077 * a shortcut which implies the use of the kernel's pgd, instead
11078 * of a process's
11079@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11080 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11081 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11082
11083+#ifdef CONFIG_X86_32
11084+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11085+#else
11086+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11087+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11088+
11089+#ifdef CONFIG_PAX_MEMORY_UDEREF
11090+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11091+#else
11092+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11093+#endif
11094+
11095+#endif
11096+
11097 #ifndef __ASSEMBLY__
11098
11099 extern int direct_gbpages;
11100@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11101 * dst and src can be on the same page, but the range must not overlap,
11102 * and must not cross a page boundary.
11103 */
11104-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11105+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11106 {
11107- memcpy(dst, src, count * sizeof(pgd_t));
11108+ pax_open_kernel();
11109+ while (count--)
11110+ *dst++ = *src++;
11111+ pax_close_kernel();
11112 }
11113
11114+#ifdef CONFIG_PAX_PER_CPU_PGD
11115+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11116+#endif
11117+
11118+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11119+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11120+#else
11121+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11122+#endif
11123
11124 #include <asm-generic/pgtable.h>
11125 #endif /* __ASSEMBLY__ */
11126diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11127index 750f1bf..971e839 100644
11128--- a/arch/x86/include/asm/pgtable_32.h
11129+++ b/arch/x86/include/asm/pgtable_32.h
11130@@ -26,9 +26,6 @@
11131 struct mm_struct;
11132 struct vm_area_struct;
11133
11134-extern pgd_t swapper_pg_dir[1024];
11135-extern pgd_t trampoline_pg_dir[1024];
11136-
11137 static inline void pgtable_cache_init(void) { }
11138 static inline void check_pgt_cache(void) { }
11139 void paging_init(void);
11140@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11141 # include <asm/pgtable-2level.h>
11142 #endif
11143
11144+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11145+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11146+#ifdef CONFIG_X86_PAE
11147+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11148+#endif
11149+
11150 #if defined(CONFIG_HIGHPTE)
11151 #define __KM_PTE \
11152 (in_nmi() ? KM_NMI_PTE : \
11153@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11154 /* Clear a kernel PTE and flush it from the TLB */
11155 #define kpte_clear_flush(ptep, vaddr) \
11156 do { \
11157+ pax_open_kernel(); \
11158 pte_clear(&init_mm, (vaddr), (ptep)); \
11159+ pax_close_kernel(); \
11160 __flush_tlb_one((vaddr)); \
11161 } while (0)
11162
11163@@ -85,6 +90,9 @@ do { \
11164
11165 #endif /* !__ASSEMBLY__ */
11166
11167+#define HAVE_ARCH_UNMAPPED_AREA
11168+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11169+
11170 /*
11171 * kern_addr_valid() is (1) for FLATMEM and (0) for
11172 * SPARSEMEM and DISCONTIGMEM
11173diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11174index 5e67c15..12d5c47 100644
11175--- a/arch/x86/include/asm/pgtable_32_types.h
11176+++ b/arch/x86/include/asm/pgtable_32_types.h
11177@@ -8,7 +8,7 @@
11178 */
11179 #ifdef CONFIG_X86_PAE
11180 # include <asm/pgtable-3level_types.h>
11181-# define PMD_SIZE (1UL << PMD_SHIFT)
11182+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11183 # define PMD_MASK (~(PMD_SIZE - 1))
11184 #else
11185 # include <asm/pgtable-2level_types.h>
11186@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11187 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11188 #endif
11189
11190+#ifdef CONFIG_PAX_KERNEXEC
11191+#ifndef __ASSEMBLY__
11192+extern unsigned char MODULES_EXEC_VADDR[];
11193+extern unsigned char MODULES_EXEC_END[];
11194+#endif
11195+#include <asm/boot.h>
11196+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11197+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11198+#else
11199+#define ktla_ktva(addr) (addr)
11200+#define ktva_ktla(addr) (addr)
11201+#endif
11202+
11203 #define MODULES_VADDR VMALLOC_START
11204 #define MODULES_END VMALLOC_END
11205 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11206diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11207index c57a301..312bdb4 100644
11208--- a/arch/x86/include/asm/pgtable_64.h
11209+++ b/arch/x86/include/asm/pgtable_64.h
11210@@ -16,10 +16,13 @@
11211
11212 extern pud_t level3_kernel_pgt[512];
11213 extern pud_t level3_ident_pgt[512];
11214+extern pud_t level3_vmalloc_pgt[512];
11215+extern pud_t level3_vmemmap_pgt[512];
11216+extern pud_t level2_vmemmap_pgt[512];
11217 extern pmd_t level2_kernel_pgt[512];
11218 extern pmd_t level2_fixmap_pgt[512];
11219-extern pmd_t level2_ident_pgt[512];
11220-extern pgd_t init_level4_pgt[];
11221+extern pmd_t level2_ident_pgt[512*2];
11222+extern pgd_t init_level4_pgt[512];
11223
11224 #define swapper_pg_dir init_level4_pgt
11225
11226@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11227
11228 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11229 {
11230+ pax_open_kernel();
11231 *pmdp = pmd;
11232+ pax_close_kernel();
11233 }
11234
11235 static inline void native_pmd_clear(pmd_t *pmd)
11236@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_t *pud)
11237
11238 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11239 {
11240+ pax_open_kernel();
11241+ *pgdp = pgd;
11242+ pax_close_kernel();
11243+}
11244+
11245+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11246+{
11247 *pgdp = pgd;
11248 }
11249
11250diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11251index 766ea16..5b96cb3 100644
11252--- a/arch/x86/include/asm/pgtable_64_types.h
11253+++ b/arch/x86/include/asm/pgtable_64_types.h
11254@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11255 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11256 #define MODULES_END _AC(0xffffffffff000000, UL)
11257 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11258+#define MODULES_EXEC_VADDR MODULES_VADDR
11259+#define MODULES_EXEC_END MODULES_END
11260+
11261+#define ktla_ktva(addr) (addr)
11262+#define ktva_ktla(addr) (addr)
11263
11264 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11265diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11266index d1f4a76..2f46ba1 100644
11267--- a/arch/x86/include/asm/pgtable_types.h
11268+++ b/arch/x86/include/asm/pgtable_types.h
11269@@ -16,12 +16,11 @@
11270 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11271 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11272 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11273-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11274+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11275 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11276 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11277 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11278-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11279-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11280+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11281 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11282
11283 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11284@@ -39,7 +38,6 @@
11285 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11286 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11287 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11288-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11289 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11290 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11291 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11292@@ -55,8 +53,10 @@
11293
11294 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11295 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11296-#else
11297+#elif defined(CONFIG_KMEMCHECK)
11298 #define _PAGE_NX (_AT(pteval_t, 0))
11299+#else
11300+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11301 #endif
11302
11303 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11304@@ -93,6 +93,9 @@
11305 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11306 _PAGE_ACCESSED)
11307
11308+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11309+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11310+
11311 #define __PAGE_KERNEL_EXEC \
11312 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11313 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11314@@ -103,8 +106,8 @@
11315 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11316 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11317 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11318-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11319-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11320+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11321+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11322 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11323 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11324 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11325@@ -163,8 +166,8 @@
11326 * bits are combined, this will alow user to access the high address mapped
11327 * VDSO in the presence of CONFIG_COMPAT_VDSO
11328 */
11329-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11330-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11331+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11332+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11333 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11334 #endif
11335
11336@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11337 {
11338 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11339 }
11340+#endif
11341
11342+#if PAGETABLE_LEVELS == 3
11343+#include <asm-generic/pgtable-nopud.h>
11344+#endif
11345+
11346+#if PAGETABLE_LEVELS == 2
11347+#include <asm-generic/pgtable-nopmd.h>
11348+#endif
11349+
11350+#ifndef __ASSEMBLY__
11351 #if PAGETABLE_LEVELS > 3
11352 typedef struct { pudval_t pud; } pud_t;
11353
11354@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11355 return pud.pud;
11356 }
11357 #else
11358-#include <asm-generic/pgtable-nopud.h>
11359-
11360 static inline pudval_t native_pud_val(pud_t pud)
11361 {
11362 return native_pgd_val(pud.pgd);
11363@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11364 return pmd.pmd;
11365 }
11366 #else
11367-#include <asm-generic/pgtable-nopmd.h>
11368-
11369 static inline pmdval_t native_pmd_val(pmd_t pmd)
11370 {
11371 return native_pgd_val(pmd.pud.pgd);
11372@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11373
11374 extern pteval_t __supported_pte_mask;
11375 extern void set_nx(void);
11376+
11377+#ifdef CONFIG_X86_32
11378+#ifdef CONFIG_X86_PAE
11379 extern int nx_enabled;
11380+#else
11381+#define nx_enabled (0)
11382+#endif
11383+#else
11384+#define nx_enabled (1)
11385+#endif
11386
11387 #define pgprot_writecombine pgprot_writecombine
11388 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11389diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11390index fa04dea..5f823fc 100644
11391--- a/arch/x86/include/asm/processor.h
11392+++ b/arch/x86/include/asm/processor.h
11393@@ -272,7 +272,7 @@ struct tss_struct {
11394
11395 } ____cacheline_aligned;
11396
11397-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11398+extern struct tss_struct init_tss[NR_CPUS];
11399
11400 /*
11401 * Save the original ist values for checking stack pointers during debugging
11402@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11403 */
11404 #define TASK_SIZE PAGE_OFFSET
11405 #define TASK_SIZE_MAX TASK_SIZE
11406+
11407+#ifdef CONFIG_PAX_SEGMEXEC
11408+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11409+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11410+#else
11411 #define STACK_TOP TASK_SIZE
11412-#define STACK_TOP_MAX STACK_TOP
11413+#endif
11414+
11415+#define STACK_TOP_MAX TASK_SIZE
11416
11417 #define INIT_THREAD { \
11418- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11419+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11420 .vm86_info = NULL, \
11421 .sysenter_cs = __KERNEL_CS, \
11422 .io_bitmap_ptr = NULL, \
11423@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11424 */
11425 #define INIT_TSS { \
11426 .x86_tss = { \
11427- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11428+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11429 .ss0 = __KERNEL_DS, \
11430 .ss1 = __KERNEL_CS, \
11431 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11432@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11433 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11434
11435 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11436-#define KSTK_TOP(info) \
11437-({ \
11438- unsigned long *__ptr = (unsigned long *)(info); \
11439- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11440-})
11441+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11442
11443 /*
11444 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11445@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11446 #define task_pt_regs(task) \
11447 ({ \
11448 struct pt_regs *__regs__; \
11449- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11450+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11451 __regs__ - 1; \
11452 })
11453
11454@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11455 /*
11456 * User space process size. 47bits minus one guard page.
11457 */
11458-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11459+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11460
11461 /* This decides where the kernel will search for a free chunk of vm
11462 * space during mmap's.
11463 */
11464 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11465- 0xc0000000 : 0xFFFFe000)
11466+ 0xc0000000 : 0xFFFFf000)
11467
11468 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11469 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11470@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11471 #define STACK_TOP_MAX TASK_SIZE_MAX
11472
11473 #define INIT_THREAD { \
11474- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11475+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11476 }
11477
11478 #define INIT_TSS { \
11479- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11480+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11481 }
11482
11483 /*
11484@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11485 */
11486 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11487
11488+#ifdef CONFIG_PAX_SEGMEXEC
11489+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11490+#endif
11491+
11492 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11493
11494 /* Get/set a process' ability to use the timestamp counter instruction */
11495diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11496index 0f0d908..f2e3da2 100644
11497--- a/arch/x86/include/asm/ptrace.h
11498+++ b/arch/x86/include/asm/ptrace.h
11499@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11500 }
11501
11502 /*
11503- * user_mode_vm(regs) determines whether a register set came from user mode.
11504+ * user_mode(regs) determines whether a register set came from user mode.
11505 * This is true if V8086 mode was enabled OR if the register set was from
11506 * protected mode with RPL-3 CS value. This tricky test checks that with
11507 * one comparison. Many places in the kernel can bypass this full check
11508- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11509+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11510+ * be used.
11511 */
11512-static inline int user_mode(struct pt_regs *regs)
11513+static inline int user_mode_novm(struct pt_regs *regs)
11514 {
11515 #ifdef CONFIG_X86_32
11516 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11517 #else
11518- return !!(regs->cs & 3);
11519+ return !!(regs->cs & SEGMENT_RPL_MASK);
11520 #endif
11521 }
11522
11523-static inline int user_mode_vm(struct pt_regs *regs)
11524+static inline int user_mode(struct pt_regs *regs)
11525 {
11526 #ifdef CONFIG_X86_32
11527 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11528 USER_RPL;
11529 #else
11530- return user_mode(regs);
11531+ return user_mode_novm(regs);
11532 #endif
11533 }
11534
11535diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11536index 562d4fd..6e39df1 100644
11537--- a/arch/x86/include/asm/reboot.h
11538+++ b/arch/x86/include/asm/reboot.h
11539@@ -6,19 +6,19 @@
11540 struct pt_regs;
11541
11542 struct machine_ops {
11543- void (*restart)(char *cmd);
11544- void (*halt)(void);
11545- void (*power_off)(void);
11546+ void (* __noreturn restart)(char *cmd);
11547+ void (* __noreturn halt)(void);
11548+ void (* __noreturn power_off)(void);
11549 void (*shutdown)(void);
11550 void (*crash_shutdown)(struct pt_regs *);
11551- void (*emergency_restart)(void);
11552-};
11553+ void (* __noreturn emergency_restart)(void);
11554+} __no_const;
11555
11556 extern struct machine_ops machine_ops;
11557
11558 void native_machine_crash_shutdown(struct pt_regs *regs);
11559 void native_machine_shutdown(void);
11560-void machine_real_restart(const unsigned char *code, int length);
11561+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11562
11563 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11564 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11565diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11566index 606ede1..dbfff37 100644
11567--- a/arch/x86/include/asm/rwsem.h
11568+++ b/arch/x86/include/asm/rwsem.h
11569@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11570 {
11571 asm volatile("# beginning down_read\n\t"
11572 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11573+
11574+#ifdef CONFIG_PAX_REFCOUNT
11575+ "jno 0f\n"
11576+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11577+ "int $4\n0:\n"
11578+ _ASM_EXTABLE(0b, 0b)
11579+#endif
11580+
11581 /* adds 0x00000001, returns the old value */
11582 " jns 1f\n"
11583 " call call_rwsem_down_read_failed\n"
11584@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11585 "1:\n\t"
11586 " mov %1,%2\n\t"
11587 " add %3,%2\n\t"
11588+
11589+#ifdef CONFIG_PAX_REFCOUNT
11590+ "jno 0f\n"
11591+ "sub %3,%2\n"
11592+ "int $4\n0:\n"
11593+ _ASM_EXTABLE(0b, 0b)
11594+#endif
11595+
11596 " jle 2f\n\t"
11597 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11598 " jnz 1b\n\t"
11599@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11600 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11601 asm volatile("# beginning down_write\n\t"
11602 LOCK_PREFIX " xadd %1,(%2)\n\t"
11603+
11604+#ifdef CONFIG_PAX_REFCOUNT
11605+ "jno 0f\n"
11606+ "mov %1,(%2)\n"
11607+ "int $4\n0:\n"
11608+ _ASM_EXTABLE(0b, 0b)
11609+#endif
11610+
11611 /* subtract 0x0000ffff, returns the old value */
11612 " test %1,%1\n\t"
11613 /* was the count 0 before? */
11614@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11615 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11616 asm volatile("# beginning __up_read\n\t"
11617 LOCK_PREFIX " xadd %1,(%2)\n\t"
11618+
11619+#ifdef CONFIG_PAX_REFCOUNT
11620+ "jno 0f\n"
11621+ "mov %1,(%2)\n"
11622+ "int $4\n0:\n"
11623+ _ASM_EXTABLE(0b, 0b)
11624+#endif
11625+
11626 /* subtracts 1, returns the old value */
11627 " jns 1f\n\t"
11628 " call call_rwsem_wake\n"
11629@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11630 rwsem_count_t tmp;
11631 asm volatile("# beginning __up_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633+
11634+#ifdef CONFIG_PAX_REFCOUNT
11635+ "jno 0f\n"
11636+ "mov %1,(%2)\n"
11637+ "int $4\n0:\n"
11638+ _ASM_EXTABLE(0b, 0b)
11639+#endif
11640+
11641 /* tries to transition
11642 0xffff0001 -> 0x00000000 */
11643 " jz 1f\n"
11644@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11645 {
11646 asm volatile("# beginning __downgrade_write\n\t"
11647 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11648+
11649+#ifdef CONFIG_PAX_REFCOUNT
11650+ "jno 0f\n"
11651+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11652+ "int $4\n0:\n"
11653+ _ASM_EXTABLE(0b, 0b)
11654+#endif
11655+
11656 /*
11657 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11658 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11659@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11660 static inline void rwsem_atomic_add(rwsem_count_t delta,
11661 struct rw_semaphore *sem)
11662 {
11663- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11664+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11665+
11666+#ifdef CONFIG_PAX_REFCOUNT
11667+ "jno 0f\n"
11668+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11669+ "int $4\n0:\n"
11670+ _ASM_EXTABLE(0b, 0b)
11671+#endif
11672+
11673 : "+m" (sem->count)
11674 : "er" (delta));
11675 }
11676@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11677 {
11678 rwsem_count_t tmp = delta;
11679
11680- asm volatile(LOCK_PREFIX "xadd %0,%1"
11681+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11682+
11683+#ifdef CONFIG_PAX_REFCOUNT
11684+ "jno 0f\n"
11685+ "mov %0,%1\n"
11686+ "int $4\n0:\n"
11687+ _ASM_EXTABLE(0b, 0b)
11688+#endif
11689+
11690 : "+r" (tmp), "+m" (sem->count)
11691 : : "memory");
11692
11693diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11694index 14e0ed8..7f7dd5e 100644
11695--- a/arch/x86/include/asm/segment.h
11696+++ b/arch/x86/include/asm/segment.h
11697@@ -62,10 +62,15 @@
11698 * 26 - ESPFIX small SS
11699 * 27 - per-cpu [ offset to per-cpu data area ]
11700 * 28 - stack_canary-20 [ for stack protector ]
11701- * 29 - unused
11702- * 30 - unused
11703+ * 29 - PCI BIOS CS
11704+ * 30 - PCI BIOS DS
11705 * 31 - TSS for double fault handler
11706 */
11707+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11708+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11709+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11710+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11711+
11712 #define GDT_ENTRY_TLS_MIN 6
11713 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11714
11715@@ -77,6 +82,8 @@
11716
11717 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11718
11719+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11720+
11721 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11722
11723 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11724@@ -88,7 +95,7 @@
11725 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11726 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11727
11728-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11729+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11730 #ifdef CONFIG_SMP
11731 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11732 #else
11733@@ -102,6 +109,12 @@
11734 #define __KERNEL_STACK_CANARY 0
11735 #endif
11736
11737+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11738+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11739+
11740+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11741+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11742+
11743 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11744
11745 /*
11746@@ -139,7 +152,7 @@
11747 */
11748
11749 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11750-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11751+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11752
11753
11754 #else
11755@@ -163,6 +176,8 @@
11756 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11757 #define __USER32_DS __USER_DS
11758
11759+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11760+
11761 #define GDT_ENTRY_TSS 8 /* needs two entries */
11762 #define GDT_ENTRY_LDT 10 /* needs two entries */
11763 #define GDT_ENTRY_TLS_MIN 12
11764@@ -183,6 +198,7 @@
11765 #endif
11766
11767 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11768+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11769 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11770 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11771 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11772diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11773index 4c2f63c..5685db2 100644
11774--- a/arch/x86/include/asm/smp.h
11775+++ b/arch/x86/include/asm/smp.h
11776@@ -24,7 +24,7 @@ extern unsigned int num_processors;
11777 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11778 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11779 DECLARE_PER_CPU(u16, cpu_llc_id);
11780-DECLARE_PER_CPU(int, cpu_number);
11781+DECLARE_PER_CPU(unsigned int, cpu_number);
11782
11783 static inline struct cpumask *cpu_sibling_mask(int cpu)
11784 {
11785@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11786 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11787
11788 /* Static state in head.S used to set up a CPU */
11789-extern struct {
11790- void *sp;
11791- unsigned short ss;
11792-} stack_start;
11793+extern unsigned long stack_start; /* Initial stack pointer address */
11794
11795 struct smp_ops {
11796 void (*smp_prepare_boot_cpu)(void);
11797@@ -60,7 +57,7 @@ struct smp_ops {
11798
11799 void (*send_call_func_ipi)(const struct cpumask *mask);
11800 void (*send_call_func_single_ipi)(int cpu);
11801-};
11802+} __no_const;
11803
11804 /* Globals due to paravirt */
11805 extern void set_cpu_sibling_map(int cpu);
11806@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11807 extern int safe_smp_processor_id(void);
11808
11809 #elif defined(CONFIG_X86_64_SMP)
11810-#define raw_smp_processor_id() (percpu_read(cpu_number))
11811-
11812-#define stack_smp_processor_id() \
11813-({ \
11814- struct thread_info *ti; \
11815- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11816- ti->cpu; \
11817-})
11818+#define raw_smp_processor_id() (percpu_read(cpu_number))
11819+#define stack_smp_processor_id() raw_smp_processor_id()
11820 #define safe_smp_processor_id() smp_processor_id()
11821
11822 #endif
11823diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11824index 4e77853..4359783 100644
11825--- a/arch/x86/include/asm/spinlock.h
11826+++ b/arch/x86/include/asm/spinlock.h
11827@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11828 static inline void __raw_read_lock(raw_rwlock_t *rw)
11829 {
11830 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11831+
11832+#ifdef CONFIG_PAX_REFCOUNT
11833+ "jno 0f\n"
11834+ LOCK_PREFIX " addl $1,(%0)\n"
11835+ "int $4\n0:\n"
11836+ _ASM_EXTABLE(0b, 0b)
11837+#endif
11838+
11839 "jns 1f\n"
11840 "call __read_lock_failed\n\t"
11841 "1:\n"
11842@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11843 static inline void __raw_write_lock(raw_rwlock_t *rw)
11844 {
11845 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11846+
11847+#ifdef CONFIG_PAX_REFCOUNT
11848+ "jno 0f\n"
11849+ LOCK_PREFIX " addl %1,(%0)\n"
11850+ "int $4\n0:\n"
11851+ _ASM_EXTABLE(0b, 0b)
11852+#endif
11853+
11854 "jz 1f\n"
11855 "call __write_lock_failed\n\t"
11856 "1:\n"
11857@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11858
11859 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11860 {
11861- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11862+ asm volatile(LOCK_PREFIX "incl %0\n"
11863+
11864+#ifdef CONFIG_PAX_REFCOUNT
11865+ "jno 0f\n"
11866+ LOCK_PREFIX "decl %0\n"
11867+ "int $4\n0:\n"
11868+ _ASM_EXTABLE(0b, 0b)
11869+#endif
11870+
11871+ :"+m" (rw->lock) : : "memory");
11872 }
11873
11874 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11875 {
11876- asm volatile(LOCK_PREFIX "addl %1, %0"
11877+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
11878+
11879+#ifdef CONFIG_PAX_REFCOUNT
11880+ "jno 0f\n"
11881+ LOCK_PREFIX "subl %1, %0\n"
11882+ "int $4\n0:\n"
11883+ _ASM_EXTABLE(0b, 0b)
11884+#endif
11885+
11886 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11887 }
11888
11889diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11890index 1575177..cb23f52 100644
11891--- a/arch/x86/include/asm/stackprotector.h
11892+++ b/arch/x86/include/asm/stackprotector.h
11893@@ -48,7 +48,7 @@
11894 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11895 */
11896 #define GDT_STACK_CANARY_INIT \
11897- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11898+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11899
11900 /*
11901 * Initialize the stackprotector canary value.
11902@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11903
11904 static inline void load_stack_canary_segment(void)
11905 {
11906-#ifdef CONFIG_X86_32
11907+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11908 asm volatile ("mov %0, %%gs" : : "r" (0));
11909 #endif
11910 }
11911diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11912index e0fbf29..858ef4a 100644
11913--- a/arch/x86/include/asm/system.h
11914+++ b/arch/x86/include/asm/system.h
11915@@ -132,7 +132,7 @@ do { \
11916 "thread_return:\n\t" \
11917 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11918 __switch_canary \
11919- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11920+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11921 "movq %%rax,%%rdi\n\t" \
11922 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11923 "jnz ret_from_fork\n\t" \
11924@@ -143,7 +143,7 @@ do { \
11925 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11926 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11927 [_tif_fork] "i" (_TIF_FORK), \
11928- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11929+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
11930 [current_task] "m" (per_cpu_var(current_task)) \
11931 __switch_canary_iparam \
11932 : "memory", "cc" __EXTRA_CLOBBER)
11933@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11934 {
11935 unsigned long __limit;
11936 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11937- return __limit + 1;
11938+ return __limit;
11939 }
11940
11941 static inline void native_clts(void)
11942@@ -340,12 +340,12 @@ void enable_hlt(void);
11943
11944 void cpu_idle_wait(void);
11945
11946-extern unsigned long arch_align_stack(unsigned long sp);
11947+#define arch_align_stack(x) ((x) & ~0xfUL)
11948 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11949
11950 void default_idle(void);
11951
11952-void stop_this_cpu(void *dummy);
11953+void stop_this_cpu(void *dummy) __noreturn;
11954
11955 /*
11956 * Force strict CPU ordering.
11957diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11958index 19c3ce4..8962535 100644
11959--- a/arch/x86/include/asm/thread_info.h
11960+++ b/arch/x86/include/asm/thread_info.h
11961@@ -10,6 +10,7 @@
11962 #include <linux/compiler.h>
11963 #include <asm/page.h>
11964 #include <asm/types.h>
11965+#include <asm/percpu.h>
11966
11967 /*
11968 * low level task data that entry.S needs immediate access to
11969@@ -24,7 +25,6 @@ struct exec_domain;
11970 #include <asm/atomic.h>
11971
11972 struct thread_info {
11973- struct task_struct *task; /* main task structure */
11974 struct exec_domain *exec_domain; /* execution domain */
11975 __u32 flags; /* low level flags */
11976 __u32 status; /* thread synchronous flags */
11977@@ -34,18 +34,12 @@ struct thread_info {
11978 mm_segment_t addr_limit;
11979 struct restart_block restart_block;
11980 void __user *sysenter_return;
11981-#ifdef CONFIG_X86_32
11982- unsigned long previous_esp; /* ESP of the previous stack in
11983- case of nested (IRQ) stacks
11984- */
11985- __u8 supervisor_stack[0];
11986-#endif
11987+ unsigned long lowest_stack;
11988 int uaccess_err;
11989 };
11990
11991-#define INIT_THREAD_INFO(tsk) \
11992+#define INIT_THREAD_INFO \
11993 { \
11994- .task = &tsk, \
11995 .exec_domain = &default_exec_domain, \
11996 .flags = 0, \
11997 .cpu = 0, \
11998@@ -56,7 +50,7 @@ struct thread_info {
11999 }, \
12000 }
12001
12002-#define init_thread_info (init_thread_union.thread_info)
12003+#define init_thread_info (init_thread_union.stack)
12004 #define init_stack (init_thread_union.stack)
12005
12006 #else /* !__ASSEMBLY__ */
12007@@ -163,6 +157,23 @@ struct thread_info {
12008 #define alloc_thread_info(tsk) \
12009 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12010
12011+#ifdef __ASSEMBLY__
12012+/* how to get the thread information struct from ASM */
12013+#define GET_THREAD_INFO(reg) \
12014+ mov PER_CPU_VAR(current_tinfo), reg
12015+
12016+/* use this one if reg already contains %esp */
12017+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12018+#else
12019+/* how to get the thread information struct from C */
12020+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12021+
12022+static __always_inline struct thread_info *current_thread_info(void)
12023+{
12024+ return percpu_read_stable(current_tinfo);
12025+}
12026+#endif
12027+
12028 #ifdef CONFIG_X86_32
12029
12030 #define STACK_WARN (THREAD_SIZE/8)
12031@@ -173,35 +184,13 @@ struct thread_info {
12032 */
12033 #ifndef __ASSEMBLY__
12034
12035-
12036 /* how to get the current stack pointer from C */
12037 register unsigned long current_stack_pointer asm("esp") __used;
12038
12039-/* how to get the thread information struct from C */
12040-static inline struct thread_info *current_thread_info(void)
12041-{
12042- return (struct thread_info *)
12043- (current_stack_pointer & ~(THREAD_SIZE - 1));
12044-}
12045-
12046-#else /* !__ASSEMBLY__ */
12047-
12048-/* how to get the thread information struct from ASM */
12049-#define GET_THREAD_INFO(reg) \
12050- movl $-THREAD_SIZE, reg; \
12051- andl %esp, reg
12052-
12053-/* use this one if reg already contains %esp */
12054-#define GET_THREAD_INFO_WITH_ESP(reg) \
12055- andl $-THREAD_SIZE, reg
12056-
12057 #endif
12058
12059 #else /* X86_32 */
12060
12061-#include <asm/percpu.h>
12062-#define KERNEL_STACK_OFFSET (5*8)
12063-
12064 /*
12065 * macros/functions for gaining access to the thread information structure
12066 * preempt_count needs to be 1 initially, until the scheduler is functional.
12067@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12068 #ifndef __ASSEMBLY__
12069 DECLARE_PER_CPU(unsigned long, kernel_stack);
12070
12071-static inline struct thread_info *current_thread_info(void)
12072-{
12073- struct thread_info *ti;
12074- ti = (void *)(percpu_read_stable(kernel_stack) +
12075- KERNEL_STACK_OFFSET - THREAD_SIZE);
12076- return ti;
12077-}
12078-
12079-#else /* !__ASSEMBLY__ */
12080-
12081-/* how to get the thread information struct from ASM */
12082-#define GET_THREAD_INFO(reg) \
12083- movq PER_CPU_VAR(kernel_stack),reg ; \
12084- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12085-
12086+/* how to get the current stack pointer from C */
12087+register unsigned long current_stack_pointer asm("rsp") __used;
12088 #endif
12089
12090 #endif /* !X86_32 */
12091@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12092 extern void free_thread_info(struct thread_info *ti);
12093 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12094 #define arch_task_cache_init arch_task_cache_init
12095+
12096+#define __HAVE_THREAD_FUNCTIONS
12097+#define task_thread_info(task) (&(task)->tinfo)
12098+#define task_stack_page(task) ((task)->stack)
12099+#define setup_thread_stack(p, org) do {} while (0)
12100+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12101+
12102+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12103+extern struct task_struct *alloc_task_struct(void);
12104+extern void free_task_struct(struct task_struct *);
12105+
12106 #endif
12107 #endif /* _ASM_X86_THREAD_INFO_H */
12108diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12109index 61c5874..8a046e9 100644
12110--- a/arch/x86/include/asm/uaccess.h
12111+++ b/arch/x86/include/asm/uaccess.h
12112@@ -8,12 +8,15 @@
12113 #include <linux/thread_info.h>
12114 #include <linux/prefetch.h>
12115 #include <linux/string.h>
12116+#include <linux/sched.h>
12117 #include <asm/asm.h>
12118 #include <asm/page.h>
12119
12120 #define VERIFY_READ 0
12121 #define VERIFY_WRITE 1
12122
12123+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12124+
12125 /*
12126 * The fs value determines whether argument validity checking should be
12127 * performed or not. If get_fs() == USER_DS, checking is performed, with
12128@@ -29,7 +32,12 @@
12129
12130 #define get_ds() (KERNEL_DS)
12131 #define get_fs() (current_thread_info()->addr_limit)
12132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12133+void __set_fs(mm_segment_t x);
12134+void set_fs(mm_segment_t x);
12135+#else
12136 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12137+#endif
12138
12139 #define segment_eq(a, b) ((a).seg == (b).seg)
12140
12141@@ -77,7 +85,33 @@
12142 * checks that the pointer is in the user space range - after calling
12143 * this function, memory access functions may still return -EFAULT.
12144 */
12145-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12146+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12147+#define access_ok(type, addr, size) \
12148+({ \
12149+ long __size = size; \
12150+ unsigned long __addr = (unsigned long)addr; \
12151+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12152+ unsigned long __end_ao = __addr + __size - 1; \
12153+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12154+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12155+ while(__addr_ao <= __end_ao) { \
12156+ char __c_ao; \
12157+ __addr_ao += PAGE_SIZE; \
12158+ if (__size > PAGE_SIZE) \
12159+ cond_resched(); \
12160+ if (__get_user(__c_ao, (char __user *)__addr)) \
12161+ break; \
12162+ if (type != VERIFY_WRITE) { \
12163+ __addr = __addr_ao; \
12164+ continue; \
12165+ } \
12166+ if (__put_user(__c_ao, (char __user *)__addr)) \
12167+ break; \
12168+ __addr = __addr_ao; \
12169+ } \
12170+ } \
12171+ __ret_ao; \
12172+})
12173
12174 /*
12175 * The exception table consists of pairs of addresses: the first is the
12176@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12177 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12178 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12179
12180-
12181+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12182+#define __copyuser_seg "gs;"
12183+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12184+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12185+#else
12186+#define __copyuser_seg
12187+#define __COPYUSER_SET_ES
12188+#define __COPYUSER_RESTORE_ES
12189+#endif
12190
12191 #ifdef CONFIG_X86_32
12192 #define __put_user_asm_u64(x, addr, err, errret) \
12193- asm volatile("1: movl %%eax,0(%2)\n" \
12194- "2: movl %%edx,4(%2)\n" \
12195+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12196+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12197 "3:\n" \
12198 ".section .fixup,\"ax\"\n" \
12199 "4: movl %3,%0\n" \
12200@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12201 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12202
12203 #define __put_user_asm_ex_u64(x, addr) \
12204- asm volatile("1: movl %%eax,0(%1)\n" \
12205- "2: movl %%edx,4(%1)\n" \
12206+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12207+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12208 "3:\n" \
12209 _ASM_EXTABLE(1b, 2b - 1b) \
12210 _ASM_EXTABLE(2b, 3b - 2b) \
12211@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12212 __typeof__(*(ptr)) __pu_val; \
12213 __chk_user_ptr(ptr); \
12214 might_fault(); \
12215- __pu_val = x; \
12216+ __pu_val = (x); \
12217 switch (sizeof(*(ptr))) { \
12218 case 1: \
12219 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12220@@ -374,7 +416,7 @@ do { \
12221 } while (0)
12222
12223 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12224- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12225+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12226 "2:\n" \
12227 ".section .fixup,\"ax\"\n" \
12228 "3: mov %3,%0\n" \
12229@@ -382,7 +424,7 @@ do { \
12230 " jmp 2b\n" \
12231 ".previous\n" \
12232 _ASM_EXTABLE(1b, 3b) \
12233- : "=r" (err), ltype(x) \
12234+ : "=r" (err), ltype (x) \
12235 : "m" (__m(addr)), "i" (errret), "0" (err))
12236
12237 #define __get_user_size_ex(x, ptr, size) \
12238@@ -407,7 +449,7 @@ do { \
12239 } while (0)
12240
12241 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12242- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12243+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12244 "2:\n" \
12245 _ASM_EXTABLE(1b, 2b - 1b) \
12246 : ltype(x) : "m" (__m(addr)))
12247@@ -424,13 +466,24 @@ do { \
12248 int __gu_err; \
12249 unsigned long __gu_val; \
12250 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12251- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12252+ (x) = (__typeof__(*(ptr)))__gu_val; \
12253 __gu_err; \
12254 })
12255
12256 /* FIXME: this hack is definitely wrong -AK */
12257 struct __large_struct { unsigned long buf[100]; };
12258-#define __m(x) (*(struct __large_struct __user *)(x))
12259+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12260+#define ____m(x) \
12261+({ \
12262+ unsigned long ____x = (unsigned long)(x); \
12263+ if (____x < PAX_USER_SHADOW_BASE) \
12264+ ____x += PAX_USER_SHADOW_BASE; \
12265+ (void __user *)____x; \
12266+})
12267+#else
12268+#define ____m(x) (x)
12269+#endif
12270+#define __m(x) (*(struct __large_struct __user *)____m(x))
12271
12272 /*
12273 * Tell gcc we read from memory instead of writing: this is because
12274@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12275 * aliasing issues.
12276 */
12277 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12278- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12279+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12280 "2:\n" \
12281 ".section .fixup,\"ax\"\n" \
12282 "3: mov %3,%0\n" \
12283@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12284 ".previous\n" \
12285 _ASM_EXTABLE(1b, 3b) \
12286 : "=r"(err) \
12287- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12288+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12289
12290 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12291- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12292+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12293 "2:\n" \
12294 _ASM_EXTABLE(1b, 2b - 1b) \
12295 : : ltype(x), "m" (__m(addr)))
12296@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12297 * On error, the variable @x is set to zero.
12298 */
12299
12300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12301+#define __get_user(x, ptr) get_user((x), (ptr))
12302+#else
12303 #define __get_user(x, ptr) \
12304 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12305+#endif
12306
12307 /**
12308 * __put_user: - Write a simple value into user space, with less checking.
12309@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12310 * Returns zero on success, or -EFAULT on error.
12311 */
12312
12313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12314+#define __put_user(x, ptr) put_user((x), (ptr))
12315+#else
12316 #define __put_user(x, ptr) \
12317 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12318+#endif
12319
12320 #define __get_user_unaligned __get_user
12321 #define __put_user_unaligned __put_user
12322@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12323 #define get_user_ex(x, ptr) do { \
12324 unsigned long __gue_val; \
12325 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12326- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12327+ (x) = (__typeof__(*(ptr)))__gue_val; \
12328 } while (0)
12329
12330 #ifdef CONFIG_X86_WP_WORKS_OK
12331@@ -567,6 +628,7 @@ extern struct movsl_mask {
12332
12333 #define ARCH_HAS_NOCACHE_UACCESS 1
12334
12335+#define ARCH_HAS_SORT_EXTABLE
12336 #ifdef CONFIG_X86_32
12337 # include "uaccess_32.h"
12338 #else
12339diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12340index 632fb44..e30e334 100644
12341--- a/arch/x86/include/asm/uaccess_32.h
12342+++ b/arch/x86/include/asm/uaccess_32.h
12343@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12344 static __always_inline unsigned long __must_check
12345 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12346 {
12347+ pax_track_stack();
12348+
12349+ if ((long)n < 0)
12350+ return n;
12351+
12352 if (__builtin_constant_p(n)) {
12353 unsigned long ret;
12354
12355@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12356 return ret;
12357 }
12358 }
12359+ if (!__builtin_constant_p(n))
12360+ check_object_size(from, n, true);
12361 return __copy_to_user_ll(to, from, n);
12362 }
12363
12364@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12365 __copy_to_user(void __user *to, const void *from, unsigned long n)
12366 {
12367 might_fault();
12368+
12369 return __copy_to_user_inatomic(to, from, n);
12370 }
12371
12372 static __always_inline unsigned long
12373 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12374 {
12375+ if ((long)n < 0)
12376+ return n;
12377+
12378 /* Avoid zeroing the tail if the copy fails..
12379 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12380 * but as the zeroing behaviour is only significant when n is not
12381@@ -138,6 +149,12 @@ static __always_inline unsigned long
12382 __copy_from_user(void *to, const void __user *from, unsigned long n)
12383 {
12384 might_fault();
12385+
12386+ pax_track_stack();
12387+
12388+ if ((long)n < 0)
12389+ return n;
12390+
12391 if (__builtin_constant_p(n)) {
12392 unsigned long ret;
12393
12394@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12395 return ret;
12396 }
12397 }
12398+ if (!__builtin_constant_p(n))
12399+ check_object_size(to, n, false);
12400 return __copy_from_user_ll(to, from, n);
12401 }
12402
12403@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12404 const void __user *from, unsigned long n)
12405 {
12406 might_fault();
12407+
12408+ if ((long)n < 0)
12409+ return n;
12410+
12411 if (__builtin_constant_p(n)) {
12412 unsigned long ret;
12413
12414@@ -182,14 +205,62 @@ static __always_inline unsigned long
12415 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12416 unsigned long n)
12417 {
12418- return __copy_from_user_ll_nocache_nozero(to, from, n);
12419+ if ((long)n < 0)
12420+ return n;
12421+
12422+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12423+}
12424+
12425+/**
12426+ * copy_to_user: - Copy a block of data into user space.
12427+ * @to: Destination address, in user space.
12428+ * @from: Source address, in kernel space.
12429+ * @n: Number of bytes to copy.
12430+ *
12431+ * Context: User context only. This function may sleep.
12432+ *
12433+ * Copy data from kernel space to user space.
12434+ *
12435+ * Returns number of bytes that could not be copied.
12436+ * On success, this will be zero.
12437+ */
12438+static __always_inline unsigned long __must_check
12439+copy_to_user(void __user *to, const void *from, unsigned long n)
12440+{
12441+ if (access_ok(VERIFY_WRITE, to, n))
12442+ n = __copy_to_user(to, from, n);
12443+ return n;
12444+}
12445+
12446+/**
12447+ * copy_from_user: - Copy a block of data from user space.
12448+ * @to: Destination address, in kernel space.
12449+ * @from: Source address, in user space.
12450+ * @n: Number of bytes to copy.
12451+ *
12452+ * Context: User context only. This function may sleep.
12453+ *
12454+ * Copy data from user space to kernel space.
12455+ *
12456+ * Returns number of bytes that could not be copied.
12457+ * On success, this will be zero.
12458+ *
12459+ * If some data could not be copied, this function will pad the copied
12460+ * data to the requested size using zero bytes.
12461+ */
12462+static __always_inline unsigned long __must_check
12463+copy_from_user(void *to, const void __user *from, unsigned long n)
12464+{
12465+ if (access_ok(VERIFY_READ, from, n))
12466+ n = __copy_from_user(to, from, n);
12467+ else if ((long)n > 0) {
12468+ if (!__builtin_constant_p(n))
12469+ check_object_size(to, n, false);
12470+ memset(to, 0, n);
12471+ }
12472+ return n;
12473 }
12474
12475-unsigned long __must_check copy_to_user(void __user *to,
12476- const void *from, unsigned long n);
12477-unsigned long __must_check copy_from_user(void *to,
12478- const void __user *from,
12479- unsigned long n);
12480 long __must_check strncpy_from_user(char *dst, const char __user *src,
12481 long count);
12482 long __must_check __strncpy_from_user(char *dst,
12483diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12484index db24b21..72a9dfc 100644
12485--- a/arch/x86/include/asm/uaccess_64.h
12486+++ b/arch/x86/include/asm/uaccess_64.h
12487@@ -9,6 +9,9 @@
12488 #include <linux/prefetch.h>
12489 #include <linux/lockdep.h>
12490 #include <asm/page.h>
12491+#include <asm/pgtable.h>
12492+
12493+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12494
12495 /*
12496 * Copy To/From Userspace
12497@@ -19,113 +22,203 @@ __must_check unsigned long
12498 copy_user_generic(void *to, const void *from, unsigned len);
12499
12500 __must_check unsigned long
12501-copy_to_user(void __user *to, const void *from, unsigned len);
12502-__must_check unsigned long
12503-copy_from_user(void *to, const void __user *from, unsigned len);
12504-__must_check unsigned long
12505 copy_in_user(void __user *to, const void __user *from, unsigned len);
12506
12507 static __always_inline __must_check
12508-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12509+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
12510 {
12511- int ret = 0;
12512+ unsigned ret = 0;
12513
12514 might_fault();
12515- if (!__builtin_constant_p(size))
12516- return copy_user_generic(dst, (__force void *)src, size);
12517+
12518+ if ((int)size < 0)
12519+ return size;
12520+
12521+#ifdef CONFIG_PAX_MEMORY_UDEREF
12522+ if (!__access_ok(VERIFY_READ, src, size))
12523+ return size;
12524+#endif
12525+
12526+ if (!__builtin_constant_p(size)) {
12527+ check_object_size(dst, size, false);
12528+
12529+#ifdef CONFIG_PAX_MEMORY_UDEREF
12530+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12531+ src += PAX_USER_SHADOW_BASE;
12532+#endif
12533+
12534+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12535+ }
12536 switch (size) {
12537- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12538+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12539 ret, "b", "b", "=q", 1);
12540 return ret;
12541- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12542+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12543 ret, "w", "w", "=r", 2);
12544 return ret;
12545- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12546+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12547 ret, "l", "k", "=r", 4);
12548 return ret;
12549- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12550+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12551 ret, "q", "", "=r", 8);
12552 return ret;
12553 case 10:
12554- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12555+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12556 ret, "q", "", "=r", 10);
12557 if (unlikely(ret))
12558 return ret;
12559 __get_user_asm(*(u16 *)(8 + (char *)dst),
12560- (u16 __user *)(8 + (char __user *)src),
12561+ (const u16 __user *)(8 + (const char __user *)src),
12562 ret, "w", "w", "=r", 2);
12563 return ret;
12564 case 16:
12565- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12566+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12567 ret, "q", "", "=r", 16);
12568 if (unlikely(ret))
12569 return ret;
12570 __get_user_asm(*(u64 *)(8 + (char *)dst),
12571- (u64 __user *)(8 + (char __user *)src),
12572+ (const u64 __user *)(8 + (const char __user *)src),
12573 ret, "q", "", "=r", 8);
12574 return ret;
12575 default:
12576- return copy_user_generic(dst, (__force void *)src, size);
12577+
12578+#ifdef CONFIG_PAX_MEMORY_UDEREF
12579+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12580+ src += PAX_USER_SHADOW_BASE;
12581+#endif
12582+
12583+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12584 }
12585 }
12586
12587 static __always_inline __must_check
12588-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12589+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
12590 {
12591- int ret = 0;
12592+ unsigned ret = 0;
12593
12594 might_fault();
12595- if (!__builtin_constant_p(size))
12596- return copy_user_generic((__force void *)dst, src, size);
12597+
12598+ pax_track_stack();
12599+
12600+ if ((int)size < 0)
12601+ return size;
12602+
12603+#ifdef CONFIG_PAX_MEMORY_UDEREF
12604+ if (!__access_ok(VERIFY_WRITE, dst, size))
12605+ return size;
12606+#endif
12607+
12608+ if (!__builtin_constant_p(size)) {
12609+ check_object_size(src, size, true);
12610+
12611+#ifdef CONFIG_PAX_MEMORY_UDEREF
12612+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12613+ dst += PAX_USER_SHADOW_BASE;
12614+#endif
12615+
12616+ return copy_user_generic((__force_kernel void *)dst, src, size);
12617+ }
12618 switch (size) {
12619- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12620+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12621 ret, "b", "b", "iq", 1);
12622 return ret;
12623- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12624+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12625 ret, "w", "w", "ir", 2);
12626 return ret;
12627- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12628+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12629 ret, "l", "k", "ir", 4);
12630 return ret;
12631- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12632+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12633 ret, "q", "", "er", 8);
12634 return ret;
12635 case 10:
12636- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12637+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12638 ret, "q", "", "er", 10);
12639 if (unlikely(ret))
12640 return ret;
12641 asm("":::"memory");
12642- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12643+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12644 ret, "w", "w", "ir", 2);
12645 return ret;
12646 case 16:
12647- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12648+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12649 ret, "q", "", "er", 16);
12650 if (unlikely(ret))
12651 return ret;
12652 asm("":::"memory");
12653- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12654+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12655 ret, "q", "", "er", 8);
12656 return ret;
12657 default:
12658- return copy_user_generic((__force void *)dst, src, size);
12659+
12660+#ifdef CONFIG_PAX_MEMORY_UDEREF
12661+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12662+ dst += PAX_USER_SHADOW_BASE;
12663+#endif
12664+
12665+ return copy_user_generic((__force_kernel void *)dst, src, size);
12666 }
12667 }
12668
12669 static __always_inline __must_check
12670-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12671+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
12672 {
12673- int ret = 0;
12674+ if (access_ok(VERIFY_WRITE, to, len))
12675+ len = __copy_to_user(to, from, len);
12676+ return len;
12677+}
12678+
12679+static __always_inline __must_check
12680+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
12681+{
12682+ if ((int)len < 0)
12683+ return len;
12684+
12685+ if (access_ok(VERIFY_READ, from, len))
12686+ len = __copy_from_user(to, from, len);
12687+ else if ((int)len > 0) {
12688+ if (!__builtin_constant_p(len))
12689+ check_object_size(to, len, false);
12690+ memset(to, 0, len);
12691+ }
12692+ return len;
12693+}
12694+
12695+static __always_inline __must_check
12696+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12697+{
12698+ unsigned ret = 0;
12699
12700 might_fault();
12701- if (!__builtin_constant_p(size))
12702- return copy_user_generic((__force void *)dst,
12703- (__force void *)src, size);
12704+
12705+ pax_track_stack();
12706+
12707+ if ((int)size < 0)
12708+ return size;
12709+
12710+#ifdef CONFIG_PAX_MEMORY_UDEREF
12711+ if (!__access_ok(VERIFY_READ, src, size))
12712+ return size;
12713+ if (!__access_ok(VERIFY_WRITE, dst, size))
12714+ return size;
12715+#endif
12716+
12717+ if (!__builtin_constant_p(size)) {
12718+
12719+#ifdef CONFIG_PAX_MEMORY_UDEREF
12720+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12721+ src += PAX_USER_SHADOW_BASE;
12722+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12723+ dst += PAX_USER_SHADOW_BASE;
12724+#endif
12725+
12726+ return copy_user_generic((__force_kernel void *)dst,
12727+ (__force_kernel const void *)src, size);
12728+ }
12729 switch (size) {
12730 case 1: {
12731 u8 tmp;
12732- __get_user_asm(tmp, (u8 __user *)src,
12733+ __get_user_asm(tmp, (const u8 __user *)src,
12734 ret, "b", "b", "=q", 1);
12735 if (likely(!ret))
12736 __put_user_asm(tmp, (u8 __user *)dst,
12737@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12738 }
12739 case 2: {
12740 u16 tmp;
12741- __get_user_asm(tmp, (u16 __user *)src,
12742+ __get_user_asm(tmp, (const u16 __user *)src,
12743 ret, "w", "w", "=r", 2);
12744 if (likely(!ret))
12745 __put_user_asm(tmp, (u16 __user *)dst,
12746@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12747
12748 case 4: {
12749 u32 tmp;
12750- __get_user_asm(tmp, (u32 __user *)src,
12751+ __get_user_asm(tmp, (const u32 __user *)src,
12752 ret, "l", "k", "=r", 4);
12753 if (likely(!ret))
12754 __put_user_asm(tmp, (u32 __user *)dst,
12755@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12756 }
12757 case 8: {
12758 u64 tmp;
12759- __get_user_asm(tmp, (u64 __user *)src,
12760+ __get_user_asm(tmp, (const u64 __user *)src,
12761 ret, "q", "", "=r", 8);
12762 if (likely(!ret))
12763 __put_user_asm(tmp, (u64 __user *)dst,
12764@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12765 return ret;
12766 }
12767 default:
12768- return copy_user_generic((__force void *)dst,
12769- (__force void *)src, size);
12770+
12771+#ifdef CONFIG_PAX_MEMORY_UDEREF
12772+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12773+ src += PAX_USER_SHADOW_BASE;
12774+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12775+ dst += PAX_USER_SHADOW_BASE;
12776+#endif
12777+
12778+ return copy_user_generic((__force_kernel void *)dst,
12779+ (__force_kernel const void *)src, size);
12780 }
12781 }
12782
12783@@ -176,33 +277,75 @@ __must_check long strlen_user(const char __user *str);
12784 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12785 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12786
12787-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12788- unsigned size);
12789+static __must_check __always_inline unsigned long
12790+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12791+{
12792+ pax_track_stack();
12793+
12794+ if ((int)size < 0)
12795+ return size;
12796+
12797+#ifdef CONFIG_PAX_MEMORY_UDEREF
12798+ if (!__access_ok(VERIFY_READ, src, size))
12799+ return size;
12800
12801-static __must_check __always_inline int
12802+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12803+ src += PAX_USER_SHADOW_BASE;
12804+#endif
12805+
12806+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12807+}
12808+
12809+static __must_check __always_inline unsigned long
12810 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12811 {
12812- return copy_user_generic((__force void *)dst, src, size);
12813+ if ((int)size < 0)
12814+ return size;
12815+
12816+#ifdef CONFIG_PAX_MEMORY_UDEREF
12817+ if (!__access_ok(VERIFY_WRITE, dst, size))
12818+ return size;
12819+
12820+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12821+ dst += PAX_USER_SHADOW_BASE;
12822+#endif
12823+
12824+ return copy_user_generic((__force_kernel void *)dst, src, size);
12825 }
12826
12827-extern long __copy_user_nocache(void *dst, const void __user *src,
12828+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12829 unsigned size, int zerorest);
12830
12831-static inline int
12832-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12833+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12834 {
12835 might_sleep();
12836+
12837+ if ((int)size < 0)
12838+ return size;
12839+
12840+#ifdef CONFIG_PAX_MEMORY_UDEREF
12841+ if (!__access_ok(VERIFY_READ, src, size))
12842+ return size;
12843+#endif
12844+
12845 return __copy_user_nocache(dst, src, size, 1);
12846 }
12847
12848-static inline int
12849-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12850+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12851 unsigned size)
12852 {
12853+ if ((int)size < 0)
12854+ return size;
12855+
12856+#ifdef CONFIG_PAX_MEMORY_UDEREF
12857+ if (!__access_ok(VERIFY_READ, src, size))
12858+ return size;
12859+#endif
12860+
12861 return __copy_user_nocache(dst, src, size, 0);
12862 }
12863
12864-unsigned long
12865-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12866+extern unsigned long
12867+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
12868
12869 #endif /* _ASM_X86_UACCESS_64_H */
12870diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12871index 9064052..786cfbc 100644
12872--- a/arch/x86/include/asm/vdso.h
12873+++ b/arch/x86/include/asm/vdso.h
12874@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12875 #define VDSO32_SYMBOL(base, name) \
12876 ({ \
12877 extern const char VDSO32_##name[]; \
12878- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12879+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12880 })
12881 #endif
12882
12883diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12884index 3d61e20..9507180 100644
12885--- a/arch/x86/include/asm/vgtod.h
12886+++ b/arch/x86/include/asm/vgtod.h
12887@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12888 int sysctl_enabled;
12889 struct timezone sys_tz;
12890 struct { /* extract of a clocksource struct */
12891+ char name[8];
12892 cycle_t (*vread)(void);
12893 cycle_t cycle_last;
12894 cycle_t mask;
12895diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12896index 61e08c0..b0da582 100644
12897--- a/arch/x86/include/asm/vmi.h
12898+++ b/arch/x86/include/asm/vmi.h
12899@@ -191,6 +191,7 @@ struct vrom_header {
12900 u8 reserved[96]; /* Reserved for headers */
12901 char vmi_init[8]; /* VMI_Init jump point */
12902 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12903+ char rom_data[8048]; /* rest of the option ROM */
12904 } __attribute__((packed));
12905
12906 struct pnp_header {
12907diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12908index c6e0bee..fcb9f74 100644
12909--- a/arch/x86/include/asm/vmi_time.h
12910+++ b/arch/x86/include/asm/vmi_time.h
12911@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12912 int (*wallclock_updated)(void);
12913 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12914 void (*cancel_alarm)(u32 flags);
12915-} vmi_timer_ops;
12916+} __no_const vmi_timer_ops;
12917
12918 /* Prototypes */
12919 extern void __init vmi_time_init(void);
12920diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12921index d0983d2..1f7c9e9 100644
12922--- a/arch/x86/include/asm/vsyscall.h
12923+++ b/arch/x86/include/asm/vsyscall.h
12924@@ -15,9 +15,10 @@ enum vsyscall_num {
12925
12926 #ifdef __KERNEL__
12927 #include <linux/seqlock.h>
12928+#include <linux/getcpu.h>
12929+#include <linux/time.h>
12930
12931 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12932-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12933
12934 /* Definitions for CONFIG_GENERIC_TIME definitions */
12935 #define __section_vsyscall_gtod_data __attribute__ \
12936@@ -31,7 +32,6 @@ enum vsyscall_num {
12937 #define VGETCPU_LSL 2
12938
12939 extern int __vgetcpu_mode;
12940-extern volatile unsigned long __jiffies;
12941
12942 /* kernel space (writeable) */
12943 extern int vgetcpu_mode;
12944@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12945
12946 extern void map_vsyscall(void);
12947
12948+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12949+extern time_t vtime(time_t *t);
12950+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12951 #endif /* __KERNEL__ */
12952
12953 #endif /* _ASM_X86_VSYSCALL_H */
12954diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12955index 2c756fd..3377e37 100644
12956--- a/arch/x86/include/asm/x86_init.h
12957+++ b/arch/x86/include/asm/x86_init.h
12958@@ -28,7 +28,7 @@ struct x86_init_mpparse {
12959 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12960 void (*find_smp_config)(unsigned int reserve);
12961 void (*get_smp_config)(unsigned int early);
12962-};
12963+} __no_const;
12964
12965 /**
12966 * struct x86_init_resources - platform specific resource related ops
12967@@ -42,7 +42,7 @@ struct x86_init_resources {
12968 void (*probe_roms)(void);
12969 void (*reserve_resources)(void);
12970 char *(*memory_setup)(void);
12971-};
12972+} __no_const;
12973
12974 /**
12975 * struct x86_init_irqs - platform specific interrupt setup
12976@@ -55,7 +55,7 @@ struct x86_init_irqs {
12977 void (*pre_vector_init)(void);
12978 void (*intr_init)(void);
12979 void (*trap_init)(void);
12980-};
12981+} __no_const;
12982
12983 /**
12984 * struct x86_init_oem - oem platform specific customizing functions
12985@@ -65,7 +65,7 @@ struct x86_init_irqs {
12986 struct x86_init_oem {
12987 void (*arch_setup)(void);
12988 void (*banner)(void);
12989-};
12990+} __no_const;
12991
12992 /**
12993 * struct x86_init_paging - platform specific paging functions
12994@@ -75,7 +75,7 @@ struct x86_init_oem {
12995 struct x86_init_paging {
12996 void (*pagetable_setup_start)(pgd_t *base);
12997 void (*pagetable_setup_done)(pgd_t *base);
12998-};
12999+} __no_const;
13000
13001 /**
13002 * struct x86_init_timers - platform specific timer setup
13003@@ -88,7 +88,7 @@ struct x86_init_timers {
13004 void (*setup_percpu_clockev)(void);
13005 void (*tsc_pre_init)(void);
13006 void (*timer_init)(void);
13007-};
13008+} __no_const;
13009
13010 /**
13011 * struct x86_init_ops - functions for platform specific setup
13012@@ -101,7 +101,7 @@ struct x86_init_ops {
13013 struct x86_init_oem oem;
13014 struct x86_init_paging paging;
13015 struct x86_init_timers timers;
13016-};
13017+} __no_const;
13018
13019 /**
13020 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13021@@ -109,7 +109,7 @@ struct x86_init_ops {
13022 */
13023 struct x86_cpuinit_ops {
13024 void (*setup_percpu_clockev)(void);
13025-};
13026+} __no_const;
13027
13028 /**
13029 * struct x86_platform_ops - platform specific runtime functions
13030@@ -121,7 +121,7 @@ struct x86_platform_ops {
13031 unsigned long (*calibrate_tsc)(void);
13032 unsigned long (*get_wallclock)(void);
13033 int (*set_wallclock)(unsigned long nowtime);
13034-};
13035+} __no_const;
13036
13037 extern struct x86_init_ops x86_init;
13038 extern struct x86_cpuinit_ops x86_cpuinit;
13039diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13040index 727acc1..554f3eb 100644
13041--- a/arch/x86/include/asm/xsave.h
13042+++ b/arch/x86/include/asm/xsave.h
13043@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13044 static inline int xsave_user(struct xsave_struct __user *buf)
13045 {
13046 int err;
13047+
13048+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13049+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13050+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13051+#endif
13052+
13053 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13054 "2:\n"
13055 ".section .fixup,\"ax\"\n"
13056@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13057 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13058 {
13059 int err;
13060- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13061+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13062 u32 lmask = mask;
13063 u32 hmask = mask >> 32;
13064
13065+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13066+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13067+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13068+#endif
13069+
13070 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13071 "2:\n"
13072 ".section .fixup,\"ax\"\n"
13073diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13074index 6a564ac..9b1340c 100644
13075--- a/arch/x86/kernel/acpi/realmode/Makefile
13076+++ b/arch/x86/kernel/acpi/realmode/Makefile
13077@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13078 $(call cc-option, -fno-stack-protector) \
13079 $(call cc-option, -mpreferred-stack-boundary=2)
13080 KBUILD_CFLAGS += $(call cc-option, -m32)
13081+ifdef CONSTIFY_PLUGIN
13082+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13083+endif
13084 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13085 GCOV_PROFILE := n
13086
13087diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13088index 580b4e2..d4129e4 100644
13089--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13090+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13091@@ -91,6 +91,9 @@ _start:
13092 /* Do any other stuff... */
13093
13094 #ifndef CONFIG_64BIT
13095+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13096+ call verify_cpu
13097+
13098 /* This could also be done in C code... */
13099 movl pmode_cr3, %eax
13100 movl %eax, %cr3
13101@@ -104,7 +107,7 @@ _start:
13102 movl %eax, %ecx
13103 orl %edx, %ecx
13104 jz 1f
13105- movl $0xc0000080, %ecx
13106+ mov $MSR_EFER, %ecx
13107 wrmsr
13108 1:
13109
13110@@ -114,6 +117,7 @@ _start:
13111 movl pmode_cr0, %eax
13112 movl %eax, %cr0
13113 jmp pmode_return
13114+# include "../../verify_cpu.S"
13115 #else
13116 pushw $0
13117 pushw trampoline_segment
13118diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13119index ca93638..7042f24 100644
13120--- a/arch/x86/kernel/acpi/sleep.c
13121+++ b/arch/x86/kernel/acpi/sleep.c
13122@@ -11,11 +11,12 @@
13123 #include <linux/cpumask.h>
13124 #include <asm/segment.h>
13125 #include <asm/desc.h>
13126+#include <asm/e820.h>
13127
13128 #include "realmode/wakeup.h"
13129 #include "sleep.h"
13130
13131-unsigned long acpi_wakeup_address;
13132+unsigned long acpi_wakeup_address = 0x2000;
13133 unsigned long acpi_realmode_flags;
13134
13135 /* address in low memory of the wakeup routine. */
13136@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13137 #else /* CONFIG_64BIT */
13138 header->trampoline_segment = setup_trampoline() >> 4;
13139 #ifdef CONFIG_SMP
13140- stack_start.sp = temp_stack + sizeof(temp_stack);
13141+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13142+
13143+ pax_open_kernel();
13144 early_gdt_descr.address =
13145 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13146+ pax_close_kernel();
13147+
13148 initial_gs = per_cpu_offset(smp_processor_id());
13149 #endif
13150 initial_code = (unsigned long)wakeup_long64;
13151@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13152 return;
13153 }
13154
13155- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13156-
13157- if (!acpi_realmode) {
13158- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13159- return;
13160- }
13161-
13162- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13163+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13164+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13165 }
13166
13167
13168diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13169index 8ded418..079961e 100644
13170--- a/arch/x86/kernel/acpi/wakeup_32.S
13171+++ b/arch/x86/kernel/acpi/wakeup_32.S
13172@@ -30,13 +30,11 @@ wakeup_pmode_return:
13173 # and restore the stack ... but you need gdt for this to work
13174 movl saved_context_esp, %esp
13175
13176- movl %cs:saved_magic, %eax
13177- cmpl $0x12345678, %eax
13178+ cmpl $0x12345678, saved_magic
13179 jne bogus_magic
13180
13181 # jump to place where we left off
13182- movl saved_eip, %eax
13183- jmp *%eax
13184+ jmp *(saved_eip)
13185
13186 bogus_magic:
13187 jmp bogus_magic
13188diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13189index de7353c..075da5f 100644
13190--- a/arch/x86/kernel/alternative.c
13191+++ b/arch/x86/kernel/alternative.c
13192@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13193
13194 BUG_ON(p->len > MAX_PATCH_LEN);
13195 /* prep the buffer with the original instructions */
13196- memcpy(insnbuf, p->instr, p->len);
13197+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13198 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13199 (unsigned long)p->instr, p->len);
13200
13201@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13202 if (smp_alt_once)
13203 free_init_pages("SMP alternatives",
13204 (unsigned long)__smp_locks,
13205- (unsigned long)__smp_locks_end);
13206+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13207
13208 restart_nmi();
13209 }
13210@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13211 * instructions. And on the local CPU you need to be protected again NMI or MCE
13212 * handlers seeing an inconsistent instruction while you patch.
13213 */
13214-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13215+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13216 size_t len)
13217 {
13218 unsigned long flags;
13219 local_irq_save(flags);
13220- memcpy(addr, opcode, len);
13221+
13222+ pax_open_kernel();
13223+ memcpy(ktla_ktva(addr), opcode, len);
13224 sync_core();
13225+ pax_close_kernel();
13226+
13227 local_irq_restore(flags);
13228 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13229 that causes hangs on some VIA CPUs. */
13230@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13231 */
13232 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13233 {
13234- unsigned long flags;
13235- char *vaddr;
13236+ unsigned char *vaddr = ktla_ktva(addr);
13237 struct page *pages[2];
13238- int i;
13239+ size_t i;
13240
13241 if (!core_kernel_text((unsigned long)addr)) {
13242- pages[0] = vmalloc_to_page(addr);
13243- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13244+ pages[0] = vmalloc_to_page(vaddr);
13245+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13246 } else {
13247- pages[0] = virt_to_page(addr);
13248+ pages[0] = virt_to_page(vaddr);
13249 WARN_ON(!PageReserved(pages[0]));
13250- pages[1] = virt_to_page(addr + PAGE_SIZE);
13251+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13252 }
13253 BUG_ON(!pages[0]);
13254- local_irq_save(flags);
13255- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13256- if (pages[1])
13257- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13258- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13259- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13260- clear_fixmap(FIX_TEXT_POKE0);
13261- if (pages[1])
13262- clear_fixmap(FIX_TEXT_POKE1);
13263- local_flush_tlb();
13264- sync_core();
13265- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13266- that causes hangs on some VIA CPUs. */
13267+ text_poke_early(addr, opcode, len);
13268 for (i = 0; i < len; i++)
13269- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13270- local_irq_restore(flags);
13271+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13272 return addr;
13273 }
13274diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13275index 3a44b75..1601800 100644
13276--- a/arch/x86/kernel/amd_iommu.c
13277+++ b/arch/x86/kernel/amd_iommu.c
13278@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13279 }
13280 }
13281
13282-static struct dma_map_ops amd_iommu_dma_ops = {
13283+static const struct dma_map_ops amd_iommu_dma_ops = {
13284 .alloc_coherent = alloc_coherent,
13285 .free_coherent = free_coherent,
13286 .map_page = map_page,
13287diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13288index 1d2d670..8e3f477 100644
13289--- a/arch/x86/kernel/apic/apic.c
13290+++ b/arch/x86/kernel/apic/apic.c
13291@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13292 /*
13293 * Debug level, exported for io_apic.c
13294 */
13295-unsigned int apic_verbosity;
13296+int apic_verbosity;
13297
13298 int pic_mode;
13299
13300@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13301 apic_write(APIC_ESR, 0);
13302 v1 = apic_read(APIC_ESR);
13303 ack_APIC_irq();
13304- atomic_inc(&irq_err_count);
13305+ atomic_inc_unchecked(&irq_err_count);
13306
13307 /*
13308 * Here is what the APIC error bits mean:
13309@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13310 u16 *bios_cpu_apicid;
13311 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13312
13313+ pax_track_stack();
13314+
13315 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13316 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13317
13318diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13319index 8928d97..f799cea 100644
13320--- a/arch/x86/kernel/apic/io_apic.c
13321+++ b/arch/x86/kernel/apic/io_apic.c
13322@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13323 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13324 GFP_ATOMIC);
13325 if (!ioapic_entries)
13326- return 0;
13327+ return NULL;
13328
13329 for (apic = 0; apic < nr_ioapics; apic++) {
13330 ioapic_entries[apic] =
13331@@ -733,7 +733,7 @@ nomem:
13332 kfree(ioapic_entries[apic]);
13333 kfree(ioapic_entries);
13334
13335- return 0;
13336+ return NULL;
13337 }
13338
13339 /*
13340@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13341 }
13342 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13343
13344-void lock_vector_lock(void)
13345+void lock_vector_lock(void) __acquires(vector_lock)
13346 {
13347 /* Used to the online set of cpus does not change
13348 * during assign_irq_vector.
13349@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13350 spin_lock(&vector_lock);
13351 }
13352
13353-void unlock_vector_lock(void)
13354+void unlock_vector_lock(void) __releases(vector_lock)
13355 {
13356 spin_unlock(&vector_lock);
13357 }
13358@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13359 ack_APIC_irq();
13360 }
13361
13362-atomic_t irq_mis_count;
13363+atomic_unchecked_t irq_mis_count;
13364
13365 static void ack_apic_level(unsigned int irq)
13366 {
13367@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13368
13369 /* Tail end of version 0x11 I/O APIC bug workaround */
13370 if (!(v & (1 << (i & 0x1f)))) {
13371- atomic_inc(&irq_mis_count);
13372+ atomic_inc_unchecked(&irq_mis_count);
13373 spin_lock(&ioapic_lock);
13374 __mask_and_edge_IO_APIC_irq(cfg);
13375 __unmask_and_level_IO_APIC_irq(cfg);
13376diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13377index 151ace6..f317474 100644
13378--- a/arch/x86/kernel/apm_32.c
13379+++ b/arch/x86/kernel/apm_32.c
13380@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13381 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13382 * even though they are called in protected mode.
13383 */
13384-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13385+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13386 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13387
13388 static const char driver_version[] = "1.16ac"; /* no spaces */
13389@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13390 BUG_ON(cpu != 0);
13391 gdt = get_cpu_gdt_table(cpu);
13392 save_desc_40 = gdt[0x40 / 8];
13393+
13394+ pax_open_kernel();
13395 gdt[0x40 / 8] = bad_bios_desc;
13396+ pax_close_kernel();
13397
13398 apm_irq_save(flags);
13399 APM_DO_SAVE_SEGS;
13400@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13401 &call->esi);
13402 APM_DO_RESTORE_SEGS;
13403 apm_irq_restore(flags);
13404+
13405+ pax_open_kernel();
13406 gdt[0x40 / 8] = save_desc_40;
13407+ pax_close_kernel();
13408+
13409 put_cpu();
13410
13411 return call->eax & 0xff;
13412@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13413 BUG_ON(cpu != 0);
13414 gdt = get_cpu_gdt_table(cpu);
13415 save_desc_40 = gdt[0x40 / 8];
13416+
13417+ pax_open_kernel();
13418 gdt[0x40 / 8] = bad_bios_desc;
13419+ pax_close_kernel();
13420
13421 apm_irq_save(flags);
13422 APM_DO_SAVE_SEGS;
13423@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13424 &call->eax);
13425 APM_DO_RESTORE_SEGS;
13426 apm_irq_restore(flags);
13427+
13428+ pax_open_kernel();
13429 gdt[0x40 / 8] = save_desc_40;
13430+ pax_close_kernel();
13431+
13432 put_cpu();
13433 return error;
13434 }
13435@@ -975,7 +989,7 @@ recalc:
13436
13437 static void apm_power_off(void)
13438 {
13439- unsigned char po_bios_call[] = {
13440+ const unsigned char po_bios_call[] = {
13441 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13442 0x8e, 0xd0, /* movw ax,ss */
13443 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13444@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13445 * code to that CPU.
13446 */
13447 gdt = get_cpu_gdt_table(0);
13448+
13449+ pax_open_kernel();
13450 set_desc_base(&gdt[APM_CS >> 3],
13451 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13452 set_desc_base(&gdt[APM_CS_16 >> 3],
13453 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13454 set_desc_base(&gdt[APM_DS >> 3],
13455 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13456+ pax_close_kernel();
13457
13458 proc_create("apm", 0, NULL, &apm_file_ops);
13459
13460diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13461index dfdbf64..9b2b6ce 100644
13462--- a/arch/x86/kernel/asm-offsets_32.c
13463+++ b/arch/x86/kernel/asm-offsets_32.c
13464@@ -51,7 +51,6 @@ void foo(void)
13465 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13466 BLANK();
13467
13468- OFFSET(TI_task, thread_info, task);
13469 OFFSET(TI_exec_domain, thread_info, exec_domain);
13470 OFFSET(TI_flags, thread_info, flags);
13471 OFFSET(TI_status, thread_info, status);
13472@@ -60,6 +59,8 @@ void foo(void)
13473 OFFSET(TI_restart_block, thread_info, restart_block);
13474 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13475 OFFSET(TI_cpu, thread_info, cpu);
13476+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13477+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13478 BLANK();
13479
13480 OFFSET(GDS_size, desc_ptr, size);
13481@@ -99,6 +100,7 @@ void foo(void)
13482
13483 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13484 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13485+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13486 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13487 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13488 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13489@@ -115,6 +117,11 @@ void foo(void)
13490 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13491 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13492 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13493+
13494+#ifdef CONFIG_PAX_KERNEXEC
13495+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13496+#endif
13497+
13498 #endif
13499
13500 #ifdef CONFIG_XEN
13501diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13502index 4a6aeed..371de20 100644
13503--- a/arch/x86/kernel/asm-offsets_64.c
13504+++ b/arch/x86/kernel/asm-offsets_64.c
13505@@ -44,6 +44,8 @@ int main(void)
13506 ENTRY(addr_limit);
13507 ENTRY(preempt_count);
13508 ENTRY(status);
13509+ ENTRY(lowest_stack);
13510+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13511 #ifdef CONFIG_IA32_EMULATION
13512 ENTRY(sysenter_return);
13513 #endif
13514@@ -63,6 +65,18 @@ int main(void)
13515 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13516 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13517 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13518+
13519+#ifdef CONFIG_PAX_KERNEXEC
13520+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13521+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13522+#endif
13523+
13524+#ifdef CONFIG_PAX_MEMORY_UDEREF
13525+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13526+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13527+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13528+#endif
13529+
13530 #endif
13531
13532
13533@@ -115,6 +129,7 @@ int main(void)
13534 ENTRY(cr8);
13535 BLANK();
13536 #undef ENTRY
13537+ DEFINE(TSS_size, sizeof(struct tss_struct));
13538 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13539 BLANK();
13540 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13541@@ -130,6 +145,7 @@ int main(void)
13542
13543 BLANK();
13544 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13545+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13546 #ifdef CONFIG_XEN
13547 BLANK();
13548 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13549diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13550index ff502cc..dc5133e 100644
13551--- a/arch/x86/kernel/cpu/Makefile
13552+++ b/arch/x86/kernel/cpu/Makefile
13553@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13554 CFLAGS_REMOVE_common.o = -pg
13555 endif
13556
13557-# Make sure load_percpu_segment has no stackprotector
13558-nostackp := $(call cc-option, -fno-stack-protector)
13559-CFLAGS_common.o := $(nostackp)
13560-
13561 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13562 obj-y += proc.o capflags.o powerflags.o common.o
13563 obj-y += vmware.o hypervisor.o sched.o
13564diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13565index 6e082dc..a0b5f36 100644
13566--- a/arch/x86/kernel/cpu/amd.c
13567+++ b/arch/x86/kernel/cpu/amd.c
13568@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13569 unsigned int size)
13570 {
13571 /* AMD errata T13 (order #21922) */
13572- if ((c->x86 == 6)) {
13573+ if (c->x86 == 6) {
13574 /* Duron Rev A0 */
13575 if (c->x86_model == 3 && c->x86_mask == 0)
13576 size = 64;
13577diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13578index 4e34d10..a53b130a 100644
13579--- a/arch/x86/kernel/cpu/common.c
13580+++ b/arch/x86/kernel/cpu/common.c
13581@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13582
13583 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13584
13585-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13586-#ifdef CONFIG_X86_64
13587- /*
13588- * We need valid kernel segments for data and code in long mode too
13589- * IRET will check the segment types kkeil 2000/10/28
13590- * Also sysret mandates a special GDT layout
13591- *
13592- * TLS descriptors are currently at a different place compared to i386.
13593- * Hopefully nobody expects them at a fixed place (Wine?)
13594- */
13595- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13596- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13597- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13598- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13599- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13600- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13601-#else
13602- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13603- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13604- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13605- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13606- /*
13607- * Segments used for calling PnP BIOS have byte granularity.
13608- * They code segments and data segments have fixed 64k limits,
13609- * the transfer segment sizes are set at run time.
13610- */
13611- /* 32-bit code */
13612- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13613- /* 16-bit code */
13614- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13615- /* 16-bit data */
13616- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13617- /* 16-bit data */
13618- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13619- /* 16-bit data */
13620- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13621- /*
13622- * The APM segments have byte granularity and their bases
13623- * are set at run time. All have 64k limits.
13624- */
13625- /* 32-bit code */
13626- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13627- /* 16-bit code */
13628- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13629- /* data */
13630- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13631-
13632- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13633- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13634- GDT_STACK_CANARY_INIT
13635-#endif
13636-} };
13637-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13638-
13639 static int __init x86_xsave_setup(char *s)
13640 {
13641 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13642@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13643 {
13644 struct desc_ptr gdt_descr;
13645
13646- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13647+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13648 gdt_descr.size = GDT_SIZE - 1;
13649 load_gdt(&gdt_descr);
13650 /* Reload the per-cpu base */
13651@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13652 /* Filter out anything that depends on CPUID levels we don't have */
13653 filter_cpuid_features(c, true);
13654
13655+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
13656+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13657+#endif
13658+
13659 /* If the model name is still unset, do table lookup. */
13660 if (!c->x86_model_id[0]) {
13661 const char *p;
13662@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13663 }
13664 __setup("clearcpuid=", setup_disablecpuid);
13665
13666+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13667+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13668+
13669 #ifdef CONFIG_X86_64
13670 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13671
13672@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13673 EXPORT_PER_CPU_SYMBOL(current_task);
13674
13675 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13676- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13677+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13678 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13679
13680 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13681@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13682 {
13683 memset(regs, 0, sizeof(struct pt_regs));
13684 regs->fs = __KERNEL_PERCPU;
13685- regs->gs = __KERNEL_STACK_CANARY;
13686+ savesegment(gs, regs->gs);
13687
13688 return regs;
13689 }
13690@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13691 int i;
13692
13693 cpu = stack_smp_processor_id();
13694- t = &per_cpu(init_tss, cpu);
13695+ t = init_tss + cpu;
13696 orig_ist = &per_cpu(orig_ist, cpu);
13697
13698 #ifdef CONFIG_NUMA
13699@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13700 switch_to_new_gdt(cpu);
13701 loadsegment(fs, 0);
13702
13703- load_idt((const struct desc_ptr *)&idt_descr);
13704+ load_idt(&idt_descr);
13705
13706 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13707 syscall_init();
13708@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13709 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13710 barrier();
13711
13712- check_efer();
13713 if (cpu != 0)
13714 enable_x2apic();
13715
13716@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13717 {
13718 int cpu = smp_processor_id();
13719 struct task_struct *curr = current;
13720- struct tss_struct *t = &per_cpu(init_tss, cpu);
13721+ struct tss_struct *t = init_tss + cpu;
13722 struct thread_struct *thread = &curr->thread;
13723
13724 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13725diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13726index 6a77cca..4f4fca0 100644
13727--- a/arch/x86/kernel/cpu/intel.c
13728+++ b/arch/x86/kernel/cpu/intel.c
13729@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13730 * Update the IDT descriptor and reload the IDT so that
13731 * it uses the read-only mapped virtual address.
13732 */
13733- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13734+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13735 load_idt(&idt_descr);
13736 }
13737 #endif
13738diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13739index 417990f..96dc36b 100644
13740--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13741+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13742@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13743 return ret;
13744 }
13745
13746-static struct sysfs_ops sysfs_ops = {
13747+static const struct sysfs_ops sysfs_ops = {
13748 .show = show,
13749 .store = store,
13750 };
13751diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13752index 472763d..9831e11 100644
13753--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13754+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13755@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13756 static int inject_init(void)
13757 {
13758 printk(KERN_INFO "Machine check injector initialized\n");
13759- mce_chrdev_ops.write = mce_write;
13760+ pax_open_kernel();
13761+ *(void **)&mce_chrdev_ops.write = mce_write;
13762+ pax_close_kernel();
13763 register_die_notifier(&mce_raise_nb);
13764 return 0;
13765 }
13766diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13767index 0f16a2b..21740f5 100644
13768--- a/arch/x86/kernel/cpu/mcheck/mce.c
13769+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13770@@ -43,6 +43,7 @@
13771 #include <asm/ipi.h>
13772 #include <asm/mce.h>
13773 #include <asm/msr.h>
13774+#include <asm/local.h>
13775
13776 #include "mce-internal.h"
13777
13778@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13779 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13780 m->cs, m->ip);
13781
13782- if (m->cs == __KERNEL_CS)
13783+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13784 print_symbol("{%s}", m->ip);
13785 pr_cont("\n");
13786 }
13787@@ -221,10 +222,10 @@ static void print_mce_tail(void)
13788
13789 #define PANIC_TIMEOUT 5 /* 5 seconds */
13790
13791-static atomic_t mce_paniced;
13792+static atomic_unchecked_t mce_paniced;
13793
13794 static int fake_panic;
13795-static atomic_t mce_fake_paniced;
13796+static atomic_unchecked_t mce_fake_paniced;
13797
13798 /* Panic in progress. Enable interrupts and wait for final IPI */
13799 static void wait_for_panic(void)
13800@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13801 /*
13802 * Make sure only one CPU runs in machine check panic
13803 */
13804- if (atomic_inc_return(&mce_paniced) > 1)
13805+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13806 wait_for_panic();
13807 barrier();
13808
13809@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13810 console_verbose();
13811 } else {
13812 /* Don't log too much for fake panic */
13813- if (atomic_inc_return(&mce_fake_paniced) > 1)
13814+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13815 return;
13816 }
13817 print_mce_head();
13818@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13819 * might have been modified by someone else.
13820 */
13821 rmb();
13822- if (atomic_read(&mce_paniced))
13823+ if (atomic_read_unchecked(&mce_paniced))
13824 wait_for_panic();
13825 if (!monarch_timeout)
13826 goto out;
13827@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13828 }
13829
13830 /* Call the installed machine check handler for this CPU setup. */
13831-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13832+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13833 unexpected_machine_check;
13834
13835 /*
13836@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13837 return;
13838 }
13839
13840+ pax_open_kernel();
13841 machine_check_vector = do_machine_check;
13842+ pax_close_kernel();
13843
13844 mce_init();
13845 mce_cpu_features(c);
13846@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13847 */
13848
13849 static DEFINE_SPINLOCK(mce_state_lock);
13850-static int open_count; /* #times opened */
13851+static local_t open_count; /* #times opened */
13852 static int open_exclu; /* already open exclusive? */
13853
13854 static int mce_open(struct inode *inode, struct file *file)
13855 {
13856 spin_lock(&mce_state_lock);
13857
13858- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13859+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13860 spin_unlock(&mce_state_lock);
13861
13862 return -EBUSY;
13863@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13864
13865 if (file->f_flags & O_EXCL)
13866 open_exclu = 1;
13867- open_count++;
13868+ local_inc(&open_count);
13869
13870 spin_unlock(&mce_state_lock);
13871
13872@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13873 {
13874 spin_lock(&mce_state_lock);
13875
13876- open_count--;
13877+ local_dec(&open_count);
13878 open_exclu = 0;
13879
13880 spin_unlock(&mce_state_lock);
13881@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13882 static void mce_reset(void)
13883 {
13884 cpu_missing = 0;
13885- atomic_set(&mce_fake_paniced, 0);
13886+ atomic_set_unchecked(&mce_fake_paniced, 0);
13887 atomic_set(&mce_executing, 0);
13888 atomic_set(&mce_callin, 0);
13889 atomic_set(&global_nwo, 0);
13890diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13891index ef3cd31..9d2f6ab 100644
13892--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13893+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13894@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13895 return ret;
13896 }
13897
13898-static struct sysfs_ops threshold_ops = {
13899+static const struct sysfs_ops threshold_ops = {
13900 .show = show,
13901 .store = store,
13902 };
13903diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13904index 5c0e653..1e82c7c 100644
13905--- a/arch/x86/kernel/cpu/mcheck/p5.c
13906+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13907@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13908 if (!cpu_has(c, X86_FEATURE_MCE))
13909 return;
13910
13911+ pax_open_kernel();
13912 machine_check_vector = pentium_machine_check;
13913+ pax_close_kernel();
13914 /* Make sure the vector pointer is visible before we enable MCEs: */
13915 wmb();
13916
13917diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13918index 54060f5..e6ba93d 100644
13919--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13920+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13921@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13922 {
13923 u32 lo, hi;
13924
13925+ pax_open_kernel();
13926 machine_check_vector = winchip_machine_check;
13927+ pax_close_kernel();
13928 /* Make sure the vector pointer is visible before we enable MCEs: */
13929 wmb();
13930
13931diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13932index 33af141..92ba9cd 100644
13933--- a/arch/x86/kernel/cpu/mtrr/amd.c
13934+++ b/arch/x86/kernel/cpu/mtrr/amd.c
13935@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
13936 return 0;
13937 }
13938
13939-static struct mtrr_ops amd_mtrr_ops = {
13940+static const struct mtrr_ops amd_mtrr_ops = {
13941 .vendor = X86_VENDOR_AMD,
13942 .set = amd_set_mtrr,
13943 .get = amd_get_mtrr,
13944diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
13945index de89f14..316fe3e 100644
13946--- a/arch/x86/kernel/cpu/mtrr/centaur.c
13947+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
13948@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
13949 return 0;
13950 }
13951
13952-static struct mtrr_ops centaur_mtrr_ops = {
13953+static const struct mtrr_ops centaur_mtrr_ops = {
13954 .vendor = X86_VENDOR_CENTAUR,
13955 .set = centaur_set_mcr,
13956 .get = centaur_get_mcr,
13957diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
13958index 228d982..68a3343 100644
13959--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
13960+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
13961@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
13962 post_set();
13963 }
13964
13965-static struct mtrr_ops cyrix_mtrr_ops = {
13966+static const struct mtrr_ops cyrix_mtrr_ops = {
13967 .vendor = X86_VENDOR_CYRIX,
13968 .set_all = cyrix_set_all,
13969 .set = cyrix_set_arr,
13970diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
13971index 55da0c5..4d75584 100644
13972--- a/arch/x86/kernel/cpu/mtrr/generic.c
13973+++ b/arch/x86/kernel/cpu/mtrr/generic.c
13974@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
13975 /*
13976 * Generic structure...
13977 */
13978-struct mtrr_ops generic_mtrr_ops = {
13979+const struct mtrr_ops generic_mtrr_ops = {
13980 .use_intel_if = 1,
13981 .set_all = generic_set_all,
13982 .get = generic_get_mtrr,
13983diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13984index fd60f09..c94ef52 100644
13985--- a/arch/x86/kernel/cpu/mtrr/main.c
13986+++ b/arch/x86/kernel/cpu/mtrr/main.c
13987@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
13988 u64 size_or_mask, size_and_mask;
13989 static bool mtrr_aps_delayed_init;
13990
13991-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13992+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13993
13994-struct mtrr_ops *mtrr_if;
13995+const struct mtrr_ops *mtrr_if;
13996
13997 static void set_mtrr(unsigned int reg, unsigned long base,
13998 unsigned long size, mtrr_type type);
13999
14000-void set_mtrr_ops(struct mtrr_ops *ops)
14001+void set_mtrr_ops(const struct mtrr_ops *ops)
14002 {
14003 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14004 mtrr_ops[ops->vendor] = ops;
14005diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14006index a501dee..816c719 100644
14007--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14008+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14009@@ -25,14 +25,14 @@ struct mtrr_ops {
14010 int (*validate_add_page)(unsigned long base, unsigned long size,
14011 unsigned int type);
14012 int (*have_wrcomb)(void);
14013-};
14014+} __do_const;
14015
14016 extern int generic_get_free_region(unsigned long base, unsigned long size,
14017 int replace_reg);
14018 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14019 unsigned int type);
14020
14021-extern struct mtrr_ops generic_mtrr_ops;
14022+extern const struct mtrr_ops generic_mtrr_ops;
14023
14024 extern int positive_have_wrcomb(void);
14025
14026@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14027 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14028 void get_mtrr_state(void);
14029
14030-extern void set_mtrr_ops(struct mtrr_ops *ops);
14031+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14032
14033 extern u64 size_or_mask, size_and_mask;
14034-extern struct mtrr_ops *mtrr_if;
14035+extern const struct mtrr_ops *mtrr_if;
14036
14037 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14038 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14039diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14040index 0ff02ca..fc49a60 100644
14041--- a/arch/x86/kernel/cpu/perf_event.c
14042+++ b/arch/x86/kernel/cpu/perf_event.c
14043@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14044 * count to the generic event atomically:
14045 */
14046 again:
14047- prev_raw_count = atomic64_read(&hwc->prev_count);
14048+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14049 rdmsrl(hwc->event_base + idx, new_raw_count);
14050
14051- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14052+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14053 new_raw_count) != prev_raw_count)
14054 goto again;
14055
14056@@ -741,7 +741,7 @@ again:
14057 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14058 delta >>= shift;
14059
14060- atomic64_add(delta, &event->count);
14061+ atomic64_add_unchecked(delta, &event->count);
14062 atomic64_sub(delta, &hwc->period_left);
14063
14064 return new_raw_count;
14065@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14066 * The hw event starts counting from this event offset,
14067 * mark it to be able to extra future deltas:
14068 */
14069- atomic64_set(&hwc->prev_count, (u64)-left);
14070+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14071
14072 err = checking_wrmsrl(hwc->event_base + idx,
14073 (u64)(-left) & x86_pmu.event_mask);
14074@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14075 break;
14076
14077 callchain_store(entry, frame.return_address);
14078- fp = frame.next_frame;
14079+ fp = (__force const void __user *)frame.next_frame;
14080 }
14081 }
14082
14083diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14084index 898df97..9e82503 100644
14085--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14086+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14087@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14088
14089 /* Interface defining a CPU specific perfctr watchdog */
14090 struct wd_ops {
14091- int (*reserve)(void);
14092- void (*unreserve)(void);
14093- int (*setup)(unsigned nmi_hz);
14094- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14095- void (*stop)(void);
14096+ int (* const reserve)(void);
14097+ void (* const unreserve)(void);
14098+ int (* const setup)(unsigned nmi_hz);
14099+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14100+ void (* const stop)(void);
14101 unsigned perfctr;
14102 unsigned evntsel;
14103 u64 checkbit;
14104@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14105 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14106 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14107
14108+/* cannot be const */
14109 static struct wd_ops intel_arch_wd_ops;
14110
14111 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14112@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14113 return 1;
14114 }
14115
14116+/* cannot be const */
14117 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14118 .reserve = single_msr_reserve,
14119 .unreserve = single_msr_unreserve,
14120diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14121index ff95824..2ffdcb5 100644
14122--- a/arch/x86/kernel/crash.c
14123+++ b/arch/x86/kernel/crash.c
14124@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14125 regs = args->regs;
14126
14127 #ifdef CONFIG_X86_32
14128- if (!user_mode_vm(regs)) {
14129+ if (!user_mode(regs)) {
14130 crash_fixup_ss_esp(&fixed_regs, regs);
14131 regs = &fixed_regs;
14132 }
14133diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14134index 37250fe..bf2ec74 100644
14135--- a/arch/x86/kernel/doublefault_32.c
14136+++ b/arch/x86/kernel/doublefault_32.c
14137@@ -11,7 +11,7 @@
14138
14139 #define DOUBLEFAULT_STACKSIZE (1024)
14140 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14141-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14142+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14143
14144 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14145
14146@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14147 unsigned long gdt, tss;
14148
14149 store_gdt(&gdt_desc);
14150- gdt = gdt_desc.address;
14151+ gdt = (unsigned long)gdt_desc.address;
14152
14153 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14154
14155@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14156 /* 0x2 bit is always set */
14157 .flags = X86_EFLAGS_SF | 0x2,
14158 .sp = STACK_START,
14159- .es = __USER_DS,
14160+ .es = __KERNEL_DS,
14161 .cs = __KERNEL_CS,
14162 .ss = __KERNEL_DS,
14163- .ds = __USER_DS,
14164+ .ds = __KERNEL_DS,
14165 .fs = __KERNEL_PERCPU,
14166
14167 .__cr3 = __pa_nodebug(swapper_pg_dir),
14168diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14169index 2d8a371..4fa6ae6 100644
14170--- a/arch/x86/kernel/dumpstack.c
14171+++ b/arch/x86/kernel/dumpstack.c
14172@@ -2,6 +2,9 @@
14173 * Copyright (C) 1991, 1992 Linus Torvalds
14174 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14175 */
14176+#ifdef CONFIG_GRKERNSEC_HIDESYM
14177+#define __INCLUDED_BY_HIDESYM 1
14178+#endif
14179 #include <linux/kallsyms.h>
14180 #include <linux/kprobes.h>
14181 #include <linux/uaccess.h>
14182@@ -28,7 +31,7 @@ static int die_counter;
14183
14184 void printk_address(unsigned long address, int reliable)
14185 {
14186- printk(" [<%p>] %s%pS\n", (void *) address,
14187+ printk(" [<%p>] %s%pA\n", (void *) address,
14188 reliable ? "" : "? ", (void *) address);
14189 }
14190
14191@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14192 static void
14193 print_ftrace_graph_addr(unsigned long addr, void *data,
14194 const struct stacktrace_ops *ops,
14195- struct thread_info *tinfo, int *graph)
14196+ struct task_struct *task, int *graph)
14197 {
14198- struct task_struct *task = tinfo->task;
14199 unsigned long ret_addr;
14200 int index = task->curr_ret_stack;
14201
14202@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14203 static inline void
14204 print_ftrace_graph_addr(unsigned long addr, void *data,
14205 const struct stacktrace_ops *ops,
14206- struct thread_info *tinfo, int *graph)
14207+ struct task_struct *task, int *graph)
14208 { }
14209 #endif
14210
14211@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14212 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14213 */
14214
14215-static inline int valid_stack_ptr(struct thread_info *tinfo,
14216- void *p, unsigned int size, void *end)
14217+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14218 {
14219- void *t = tinfo;
14220 if (end) {
14221 if (p < end && p >= (end-THREAD_SIZE))
14222 return 1;
14223@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14224 }
14225
14226 unsigned long
14227-print_context_stack(struct thread_info *tinfo,
14228+print_context_stack(struct task_struct *task, void *stack_start,
14229 unsigned long *stack, unsigned long bp,
14230 const struct stacktrace_ops *ops, void *data,
14231 unsigned long *end, int *graph)
14232 {
14233 struct stack_frame *frame = (struct stack_frame *)bp;
14234
14235- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14236+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14237 unsigned long addr;
14238
14239 addr = *stack;
14240@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14241 } else {
14242 ops->address(data, addr, 0);
14243 }
14244- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14245+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14246 }
14247 stack++;
14248 }
14249@@ -180,7 +180,7 @@ void dump_stack(void)
14250 #endif
14251
14252 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14253- current->pid, current->comm, print_tainted(),
14254+ task_pid_nr(current), current->comm, print_tainted(),
14255 init_utsname()->release,
14256 (int)strcspn(init_utsname()->version, " "),
14257 init_utsname()->version);
14258@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14259 return flags;
14260 }
14261
14262+extern void gr_handle_kernel_exploit(void);
14263+
14264 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14265 {
14266 if (regs && kexec_should_crash(current))
14267@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14268 panic("Fatal exception in interrupt");
14269 if (panic_on_oops)
14270 panic("Fatal exception");
14271- do_exit(signr);
14272+
14273+ gr_handle_kernel_exploit();
14274+
14275+ do_group_exit(signr);
14276 }
14277
14278 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14279@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14280 unsigned long flags = oops_begin();
14281 int sig = SIGSEGV;
14282
14283- if (!user_mode_vm(regs))
14284+ if (!user_mode(regs))
14285 report_bug(regs->ip, regs);
14286
14287 if (__die(str, regs, err))
14288diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14289index 81086c2..13e8b17 100644
14290--- a/arch/x86/kernel/dumpstack.h
14291+++ b/arch/x86/kernel/dumpstack.h
14292@@ -15,7 +15,7 @@
14293 #endif
14294
14295 extern unsigned long
14296-print_context_stack(struct thread_info *tinfo,
14297+print_context_stack(struct task_struct *task, void *stack_start,
14298 unsigned long *stack, unsigned long bp,
14299 const struct stacktrace_ops *ops, void *data,
14300 unsigned long *end, int *graph);
14301diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14302index f7dd2a7..504f53b 100644
14303--- a/arch/x86/kernel/dumpstack_32.c
14304+++ b/arch/x86/kernel/dumpstack_32.c
14305@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14306 #endif
14307
14308 for (;;) {
14309- struct thread_info *context;
14310+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14311+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14312
14313- context = (struct thread_info *)
14314- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14315- bp = print_context_stack(context, stack, bp, ops,
14316- data, NULL, &graph);
14317-
14318- stack = (unsigned long *)context->previous_esp;
14319- if (!stack)
14320+ if (stack_start == task_stack_page(task))
14321 break;
14322+ stack = *(unsigned long **)stack_start;
14323 if (ops->stack(data, "IRQ") < 0)
14324 break;
14325 touch_nmi_watchdog();
14326@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14327 * When in-kernel, we also print out the stack and code at the
14328 * time of the fault..
14329 */
14330- if (!user_mode_vm(regs)) {
14331+ if (!user_mode(regs)) {
14332 unsigned int code_prologue = code_bytes * 43 / 64;
14333 unsigned int code_len = code_bytes;
14334 unsigned char c;
14335 u8 *ip;
14336+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14337
14338 printk(KERN_EMERG "Stack:\n");
14339 show_stack_log_lvl(NULL, regs, &regs->sp,
14340@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14341
14342 printk(KERN_EMERG "Code: ");
14343
14344- ip = (u8 *)regs->ip - code_prologue;
14345+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14346 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14347 /* try starting at IP */
14348- ip = (u8 *)regs->ip;
14349+ ip = (u8 *)regs->ip + cs_base;
14350 code_len = code_len - code_prologue + 1;
14351 }
14352 for (i = 0; i < code_len; i++, ip++) {
14353@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14354 printk(" Bad EIP value.");
14355 break;
14356 }
14357- if (ip == (u8 *)regs->ip)
14358+ if (ip == (u8 *)regs->ip + cs_base)
14359 printk("<%02x> ", c);
14360 else
14361 printk("%02x ", c);
14362@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14363 printk("\n");
14364 }
14365
14366+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14367+void pax_check_alloca(unsigned long size)
14368+{
14369+ unsigned long sp = (unsigned long)&sp, stack_left;
14370+
14371+ /* all kernel stacks are of the same size */
14372+ stack_left = sp & (THREAD_SIZE - 1);
14373+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14374+}
14375+EXPORT_SYMBOL(pax_check_alloca);
14376+#endif
14377+
14378 int is_valid_bugaddr(unsigned long ip)
14379 {
14380 unsigned short ud2;
14381
14382+ ip = ktla_ktva(ip);
14383 if (ip < PAGE_OFFSET)
14384 return 0;
14385 if (probe_kernel_address((unsigned short *)ip, ud2))
14386diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14387index a071e6b..36cd585 100644
14388--- a/arch/x86/kernel/dumpstack_64.c
14389+++ b/arch/x86/kernel/dumpstack_64.c
14390@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14391 unsigned long *irq_stack_end =
14392 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14393 unsigned used = 0;
14394- struct thread_info *tinfo;
14395 int graph = 0;
14396+ void *stack_start;
14397
14398 if (!task)
14399 task = current;
14400@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14401 * current stack address. If the stacks consist of nested
14402 * exceptions
14403 */
14404- tinfo = task_thread_info(task);
14405 for (;;) {
14406 char *id;
14407 unsigned long *estack_end;
14408+
14409 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14410 &used, &id);
14411
14412@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14413 if (ops->stack(data, id) < 0)
14414 break;
14415
14416- bp = print_context_stack(tinfo, stack, bp, ops,
14417+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14418 data, estack_end, &graph);
14419 ops->stack(data, "<EOE>");
14420 /*
14421@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14422 if (stack >= irq_stack && stack < irq_stack_end) {
14423 if (ops->stack(data, "IRQ") < 0)
14424 break;
14425- bp = print_context_stack(tinfo, stack, bp,
14426+ bp = print_context_stack(task, irq_stack, stack, bp,
14427 ops, data, irq_stack_end, &graph);
14428 /*
14429 * We link to the next stack (which would be
14430@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14431 /*
14432 * This handles the process stack:
14433 */
14434- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14435+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14436+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14437 put_cpu();
14438 }
14439 EXPORT_SYMBOL(dump_trace);
14440@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14441 return ud2 == 0x0b0f;
14442 }
14443
14444+
14445+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14446+void pax_check_alloca(unsigned long size)
14447+{
14448+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14449+ unsigned cpu, used;
14450+ char *id;
14451+
14452+ /* check the process stack first */
14453+ stack_start = (unsigned long)task_stack_page(current);
14454+ stack_end = stack_start + THREAD_SIZE;
14455+ if (likely(stack_start <= sp && sp < stack_end)) {
14456+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14457+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14458+ return;
14459+ }
14460+
14461+ cpu = get_cpu();
14462+
14463+ /* check the irq stacks */
14464+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14465+ stack_start = stack_end - IRQ_STACK_SIZE;
14466+ if (stack_start <= sp && sp < stack_end) {
14467+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14468+ put_cpu();
14469+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14470+ return;
14471+ }
14472+
14473+ /* check the exception stacks */
14474+ used = 0;
14475+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14476+ stack_start = stack_end - EXCEPTION_STKSZ;
14477+ if (stack_end && stack_start <= sp && sp < stack_end) {
14478+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14479+ put_cpu();
14480+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14481+ return;
14482+ }
14483+
14484+ put_cpu();
14485+
14486+ /* unknown stack */
14487+ BUG();
14488+}
14489+EXPORT_SYMBOL(pax_check_alloca);
14490+#endif
14491diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14492index a89739a..95e0c48 100644
14493--- a/arch/x86/kernel/e820.c
14494+++ b/arch/x86/kernel/e820.c
14495@@ -733,7 +733,7 @@ struct early_res {
14496 };
14497 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14498 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14499- {}
14500+ { 0, 0, {0}, 0 }
14501 };
14502
14503 static int __init find_overlapped_early(u64 start, u64 end)
14504diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14505index b9c830c..1e41a96 100644
14506--- a/arch/x86/kernel/early_printk.c
14507+++ b/arch/x86/kernel/early_printk.c
14508@@ -7,6 +7,7 @@
14509 #include <linux/pci_regs.h>
14510 #include <linux/pci_ids.h>
14511 #include <linux/errno.h>
14512+#include <linux/sched.h>
14513 #include <asm/io.h>
14514 #include <asm/processor.h>
14515 #include <asm/fcntl.h>
14516@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14517 int n;
14518 va_list ap;
14519
14520+ pax_track_stack();
14521+
14522 va_start(ap, fmt);
14523 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14524 early_console->write(early_console, buf, n);
14525diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14526index 5cab48e..b025f9b 100644
14527--- a/arch/x86/kernel/efi_32.c
14528+++ b/arch/x86/kernel/efi_32.c
14529@@ -38,70 +38,56 @@
14530 */
14531
14532 static unsigned long efi_rt_eflags;
14533-static pgd_t efi_bak_pg_dir_pointer[2];
14534+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14535
14536-void efi_call_phys_prelog(void)
14537+void __init efi_call_phys_prelog(void)
14538 {
14539- unsigned long cr4;
14540- unsigned long temp;
14541 struct desc_ptr gdt_descr;
14542
14543+#ifdef CONFIG_PAX_KERNEXEC
14544+ struct desc_struct d;
14545+#endif
14546+
14547 local_irq_save(efi_rt_eflags);
14548
14549- /*
14550- * If I don't have PAE, I should just duplicate two entries in page
14551- * directory. If I have PAE, I just need to duplicate one entry in
14552- * page directory.
14553- */
14554- cr4 = read_cr4_safe();
14555-
14556- if (cr4 & X86_CR4_PAE) {
14557- efi_bak_pg_dir_pointer[0].pgd =
14558- swapper_pg_dir[pgd_index(0)].pgd;
14559- swapper_pg_dir[0].pgd =
14560- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14561- } else {
14562- efi_bak_pg_dir_pointer[0].pgd =
14563- swapper_pg_dir[pgd_index(0)].pgd;
14564- efi_bak_pg_dir_pointer[1].pgd =
14565- swapper_pg_dir[pgd_index(0x400000)].pgd;
14566- swapper_pg_dir[pgd_index(0)].pgd =
14567- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14568- temp = PAGE_OFFSET + 0x400000;
14569- swapper_pg_dir[pgd_index(0x400000)].pgd =
14570- swapper_pg_dir[pgd_index(temp)].pgd;
14571- }
14572+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14573+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14574+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14575
14576 /*
14577 * After the lock is released, the original page table is restored.
14578 */
14579 __flush_tlb_all();
14580
14581+#ifdef CONFIG_PAX_KERNEXEC
14582+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14583+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14584+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14585+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14586+#endif
14587+
14588 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14589 gdt_descr.size = GDT_SIZE - 1;
14590 load_gdt(&gdt_descr);
14591 }
14592
14593-void efi_call_phys_epilog(void)
14594+void __init efi_call_phys_epilog(void)
14595 {
14596- unsigned long cr4;
14597 struct desc_ptr gdt_descr;
14598
14599+#ifdef CONFIG_PAX_KERNEXEC
14600+ struct desc_struct d;
14601+
14602+ memset(&d, 0, sizeof d);
14603+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14604+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14605+#endif
14606+
14607 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14608 gdt_descr.size = GDT_SIZE - 1;
14609 load_gdt(&gdt_descr);
14610
14611- cr4 = read_cr4_safe();
14612-
14613- if (cr4 & X86_CR4_PAE) {
14614- swapper_pg_dir[pgd_index(0)].pgd =
14615- efi_bak_pg_dir_pointer[0].pgd;
14616- } else {
14617- swapper_pg_dir[pgd_index(0)].pgd =
14618- efi_bak_pg_dir_pointer[0].pgd;
14619- swapper_pg_dir[pgd_index(0x400000)].pgd =
14620- efi_bak_pg_dir_pointer[1].pgd;
14621- }
14622+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14623
14624 /*
14625 * After the lock is released, the original page table is restored.
14626diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14627index fbe66e6..c5c0dd2 100644
14628--- a/arch/x86/kernel/efi_stub_32.S
14629+++ b/arch/x86/kernel/efi_stub_32.S
14630@@ -6,7 +6,9 @@
14631 */
14632
14633 #include <linux/linkage.h>
14634+#include <linux/init.h>
14635 #include <asm/page_types.h>
14636+#include <asm/segment.h>
14637
14638 /*
14639 * efi_call_phys(void *, ...) is a function with variable parameters.
14640@@ -20,7 +22,7 @@
14641 * service functions will comply with gcc calling convention, too.
14642 */
14643
14644-.text
14645+__INIT
14646 ENTRY(efi_call_phys)
14647 /*
14648 * 0. The function can only be called in Linux kernel. So CS has been
14649@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14650 * The mapping of lower virtual memory has been created in prelog and
14651 * epilog.
14652 */
14653- movl $1f, %edx
14654- subl $__PAGE_OFFSET, %edx
14655- jmp *%edx
14656+ movl $(__KERNEXEC_EFI_DS), %edx
14657+ mov %edx, %ds
14658+ mov %edx, %es
14659+ mov %edx, %ss
14660+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14661 1:
14662
14663 /*
14664@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14665 * parameter 2, ..., param n. To make things easy, we save the return
14666 * address of efi_call_phys in a global variable.
14667 */
14668- popl %edx
14669- movl %edx, saved_return_addr
14670- /* get the function pointer into ECX*/
14671- popl %ecx
14672- movl %ecx, efi_rt_function_ptr
14673- movl $2f, %edx
14674- subl $__PAGE_OFFSET, %edx
14675- pushl %edx
14676+ popl (saved_return_addr)
14677+ popl (efi_rt_function_ptr)
14678
14679 /*
14680 * 3. Clear PG bit in %CR0.
14681@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14682 /*
14683 * 5. Call the physical function.
14684 */
14685- jmp *%ecx
14686+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14687
14688-2:
14689 /*
14690 * 6. After EFI runtime service returns, control will return to
14691 * following instruction. We'd better readjust stack pointer first.
14692@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14693 movl %cr0, %edx
14694 orl $0x80000000, %edx
14695 movl %edx, %cr0
14696- jmp 1f
14697-1:
14698+
14699 /*
14700 * 8. Now restore the virtual mode from flat mode by
14701 * adding EIP with PAGE_OFFSET.
14702 */
14703- movl $1f, %edx
14704- jmp *%edx
14705+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14706 1:
14707+ movl $(__KERNEL_DS), %edx
14708+ mov %edx, %ds
14709+ mov %edx, %es
14710+ mov %edx, %ss
14711
14712 /*
14713 * 9. Balance the stack. And because EAX contain the return value,
14714 * we'd better not clobber it.
14715 */
14716- leal efi_rt_function_ptr, %edx
14717- movl (%edx), %ecx
14718- pushl %ecx
14719+ pushl (efi_rt_function_ptr)
14720
14721 /*
14722- * 10. Push the saved return address onto the stack and return.
14723+ * 10. Return to the saved return address.
14724 */
14725- leal saved_return_addr, %edx
14726- movl (%edx), %ecx
14727- pushl %ecx
14728- ret
14729+ jmpl *(saved_return_addr)
14730 ENDPROC(efi_call_phys)
14731 .previous
14732
14733-.data
14734+__INITDATA
14735 saved_return_addr:
14736 .long 0
14737 efi_rt_function_ptr:
14738diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14739index 4c07cca..2c8427d 100644
14740--- a/arch/x86/kernel/efi_stub_64.S
14741+++ b/arch/x86/kernel/efi_stub_64.S
14742@@ -7,6 +7,7 @@
14743 */
14744
14745 #include <linux/linkage.h>
14746+#include <asm/alternative-asm.h>
14747
14748 #define SAVE_XMM \
14749 mov %rsp, %rax; \
14750@@ -40,6 +41,7 @@ ENTRY(efi_call0)
14751 call *%rdi
14752 addq $32, %rsp
14753 RESTORE_XMM
14754+ pax_force_retaddr 0, 1
14755 ret
14756 ENDPROC(efi_call0)
14757
14758@@ -50,6 +52,7 @@ ENTRY(efi_call1)
14759 call *%rdi
14760 addq $32, %rsp
14761 RESTORE_XMM
14762+ pax_force_retaddr 0, 1
14763 ret
14764 ENDPROC(efi_call1)
14765
14766@@ -60,6 +63,7 @@ ENTRY(efi_call2)
14767 call *%rdi
14768 addq $32, %rsp
14769 RESTORE_XMM
14770+ pax_force_retaddr 0, 1
14771 ret
14772 ENDPROC(efi_call2)
14773
14774@@ -71,6 +75,7 @@ ENTRY(efi_call3)
14775 call *%rdi
14776 addq $32, %rsp
14777 RESTORE_XMM
14778+ pax_force_retaddr 0, 1
14779 ret
14780 ENDPROC(efi_call3)
14781
14782@@ -83,6 +88,7 @@ ENTRY(efi_call4)
14783 call *%rdi
14784 addq $32, %rsp
14785 RESTORE_XMM
14786+ pax_force_retaddr 0, 1
14787 ret
14788 ENDPROC(efi_call4)
14789
14790@@ -96,6 +102,7 @@ ENTRY(efi_call5)
14791 call *%rdi
14792 addq $48, %rsp
14793 RESTORE_XMM
14794+ pax_force_retaddr 0, 1
14795 ret
14796 ENDPROC(efi_call5)
14797
14798@@ -112,5 +119,6 @@ ENTRY(efi_call6)
14799 call *%rdi
14800 addq $48, %rsp
14801 RESTORE_XMM
14802+ pax_force_retaddr 0, 1
14803 ret
14804 ENDPROC(efi_call6)
14805diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14806index c097e7d..c689cf4 100644
14807--- a/arch/x86/kernel/entry_32.S
14808+++ b/arch/x86/kernel/entry_32.S
14809@@ -185,13 +185,146 @@
14810 /*CFI_REL_OFFSET gs, PT_GS*/
14811 .endm
14812 .macro SET_KERNEL_GS reg
14813+
14814+#ifdef CONFIG_CC_STACKPROTECTOR
14815 movl $(__KERNEL_STACK_CANARY), \reg
14816+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14817+ movl $(__USER_DS), \reg
14818+#else
14819+ xorl \reg, \reg
14820+#endif
14821+
14822 movl \reg, %gs
14823 .endm
14824
14825 #endif /* CONFIG_X86_32_LAZY_GS */
14826
14827-.macro SAVE_ALL
14828+.macro pax_enter_kernel
14829+#ifdef CONFIG_PAX_KERNEXEC
14830+ call pax_enter_kernel
14831+#endif
14832+.endm
14833+
14834+.macro pax_exit_kernel
14835+#ifdef CONFIG_PAX_KERNEXEC
14836+ call pax_exit_kernel
14837+#endif
14838+.endm
14839+
14840+#ifdef CONFIG_PAX_KERNEXEC
14841+ENTRY(pax_enter_kernel)
14842+#ifdef CONFIG_PARAVIRT
14843+ pushl %eax
14844+ pushl %ecx
14845+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14846+ mov %eax, %esi
14847+#else
14848+ mov %cr0, %esi
14849+#endif
14850+ bts $16, %esi
14851+ jnc 1f
14852+ mov %cs, %esi
14853+ cmp $__KERNEL_CS, %esi
14854+ jz 3f
14855+ ljmp $__KERNEL_CS, $3f
14856+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14857+2:
14858+#ifdef CONFIG_PARAVIRT
14859+ mov %esi, %eax
14860+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14861+#else
14862+ mov %esi, %cr0
14863+#endif
14864+3:
14865+#ifdef CONFIG_PARAVIRT
14866+ popl %ecx
14867+ popl %eax
14868+#endif
14869+ ret
14870+ENDPROC(pax_enter_kernel)
14871+
14872+ENTRY(pax_exit_kernel)
14873+#ifdef CONFIG_PARAVIRT
14874+ pushl %eax
14875+ pushl %ecx
14876+#endif
14877+ mov %cs, %esi
14878+ cmp $__KERNEXEC_KERNEL_CS, %esi
14879+ jnz 2f
14880+#ifdef CONFIG_PARAVIRT
14881+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14882+ mov %eax, %esi
14883+#else
14884+ mov %cr0, %esi
14885+#endif
14886+ btr $16, %esi
14887+ ljmp $__KERNEL_CS, $1f
14888+1:
14889+#ifdef CONFIG_PARAVIRT
14890+ mov %esi, %eax
14891+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14892+#else
14893+ mov %esi, %cr0
14894+#endif
14895+2:
14896+#ifdef CONFIG_PARAVIRT
14897+ popl %ecx
14898+ popl %eax
14899+#endif
14900+ ret
14901+ENDPROC(pax_exit_kernel)
14902+#endif
14903+
14904+.macro pax_erase_kstack
14905+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14906+ call pax_erase_kstack
14907+#endif
14908+.endm
14909+
14910+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14911+/*
14912+ * ebp: thread_info
14913+ * ecx, edx: can be clobbered
14914+ */
14915+ENTRY(pax_erase_kstack)
14916+ pushl %edi
14917+ pushl %eax
14918+
14919+ mov TI_lowest_stack(%ebp), %edi
14920+ mov $-0xBEEF, %eax
14921+ std
14922+
14923+1: mov %edi, %ecx
14924+ and $THREAD_SIZE_asm - 1, %ecx
14925+ shr $2, %ecx
14926+ repne scasl
14927+ jecxz 2f
14928+
14929+ cmp $2*16, %ecx
14930+ jc 2f
14931+
14932+ mov $2*16, %ecx
14933+ repe scasl
14934+ jecxz 2f
14935+ jne 1b
14936+
14937+2: cld
14938+ mov %esp, %ecx
14939+ sub %edi, %ecx
14940+ shr $2, %ecx
14941+ rep stosl
14942+
14943+ mov TI_task_thread_sp0(%ebp), %edi
14944+ sub $128, %edi
14945+ mov %edi, TI_lowest_stack(%ebp)
14946+
14947+ popl %eax
14948+ popl %edi
14949+ ret
14950+ENDPROC(pax_erase_kstack)
14951+#endif
14952+
14953+.macro __SAVE_ALL _DS
14954 cld
14955 PUSH_GS
14956 pushl %fs
14957@@ -224,7 +357,7 @@
14958 pushl %ebx
14959 CFI_ADJUST_CFA_OFFSET 4
14960 CFI_REL_OFFSET ebx, 0
14961- movl $(__USER_DS), %edx
14962+ movl $\_DS, %edx
14963 movl %edx, %ds
14964 movl %edx, %es
14965 movl $(__KERNEL_PERCPU), %edx
14966@@ -232,6 +365,15 @@
14967 SET_KERNEL_GS %edx
14968 .endm
14969
14970+.macro SAVE_ALL
14971+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14972+ __SAVE_ALL __KERNEL_DS
14973+ pax_enter_kernel
14974+#else
14975+ __SAVE_ALL __USER_DS
14976+#endif
14977+.endm
14978+
14979 .macro RESTORE_INT_REGS
14980 popl %ebx
14981 CFI_ADJUST_CFA_OFFSET -4
14982@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
14983 CFI_ADJUST_CFA_OFFSET -4
14984 jmp syscall_exit
14985 CFI_ENDPROC
14986-END(ret_from_fork)
14987+ENDPROC(ret_from_fork)
14988
14989 /*
14990 * Return to user mode is not as complex as all this looks,
14991@@ -352,7 +494,15 @@ check_userspace:
14992 movb PT_CS(%esp), %al
14993 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14994 cmpl $USER_RPL, %eax
14995+
14996+#ifdef CONFIG_PAX_KERNEXEC
14997+ jae resume_userspace
14998+
14999+ PAX_EXIT_KERNEL
15000+ jmp resume_kernel
15001+#else
15002 jb resume_kernel # not returning to v8086 or userspace
15003+#endif
15004
15005 ENTRY(resume_userspace)
15006 LOCKDEP_SYS_EXIT
15007@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15008 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15009 # int/exception return?
15010 jne work_pending
15011- jmp restore_all
15012-END(ret_from_exception)
15013+ jmp restore_all_pax
15014+ENDPROC(ret_from_exception)
15015
15016 #ifdef CONFIG_PREEMPT
15017 ENTRY(resume_kernel)
15018@@ -380,7 +530,7 @@ need_resched:
15019 jz restore_all
15020 call preempt_schedule_irq
15021 jmp need_resched
15022-END(resume_kernel)
15023+ENDPROC(resume_kernel)
15024 #endif
15025 CFI_ENDPROC
15026
15027@@ -414,25 +564,36 @@ sysenter_past_esp:
15028 /*CFI_REL_OFFSET cs, 0*/
15029 /*
15030 * Push current_thread_info()->sysenter_return to the stack.
15031- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15032- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15033 */
15034- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15035+ pushl $0
15036 CFI_ADJUST_CFA_OFFSET 4
15037 CFI_REL_OFFSET eip, 0
15038
15039 pushl %eax
15040 CFI_ADJUST_CFA_OFFSET 4
15041 SAVE_ALL
15042+ GET_THREAD_INFO(%ebp)
15043+ movl TI_sysenter_return(%ebp),%ebp
15044+ movl %ebp,PT_EIP(%esp)
15045 ENABLE_INTERRUPTS(CLBR_NONE)
15046
15047 /*
15048 * Load the potential sixth argument from user stack.
15049 * Careful about security.
15050 */
15051+ movl PT_OLDESP(%esp),%ebp
15052+
15053+#ifdef CONFIG_PAX_MEMORY_UDEREF
15054+ mov PT_OLDSS(%esp),%ds
15055+1: movl %ds:(%ebp),%ebp
15056+ push %ss
15057+ pop %ds
15058+#else
15059 cmpl $__PAGE_OFFSET-3,%ebp
15060 jae syscall_fault
15061 1: movl (%ebp),%ebp
15062+#endif
15063+
15064 movl %ebp,PT_EBP(%esp)
15065 .section __ex_table,"a"
15066 .align 4
15067@@ -455,12 +616,24 @@ sysenter_do_call:
15068 testl $_TIF_ALLWORK_MASK, %ecx
15069 jne sysexit_audit
15070 sysenter_exit:
15071+
15072+#ifdef CONFIG_PAX_RANDKSTACK
15073+ pushl_cfi %eax
15074+ movl %esp, %eax
15075+ call pax_randomize_kstack
15076+ popl_cfi %eax
15077+#endif
15078+
15079+ pax_erase_kstack
15080+
15081 /* if something modifies registers it must also disable sysexit */
15082 movl PT_EIP(%esp), %edx
15083 movl PT_OLDESP(%esp), %ecx
15084 xorl %ebp,%ebp
15085 TRACE_IRQS_ON
15086 1: mov PT_FS(%esp), %fs
15087+2: mov PT_DS(%esp), %ds
15088+3: mov PT_ES(%esp), %es
15089 PTGS_TO_GS
15090 ENABLE_INTERRUPTS_SYSEXIT
15091
15092@@ -477,6 +650,9 @@ sysenter_audit:
15093 movl %eax,%edx /* 2nd arg: syscall number */
15094 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15095 call audit_syscall_entry
15096+
15097+ pax_erase_kstack
15098+
15099 pushl %ebx
15100 CFI_ADJUST_CFA_OFFSET 4
15101 movl PT_EAX(%esp),%eax /* reload syscall number */
15102@@ -504,11 +680,17 @@ sysexit_audit:
15103
15104 CFI_ENDPROC
15105 .pushsection .fixup,"ax"
15106-2: movl $0,PT_FS(%esp)
15107+4: movl $0,PT_FS(%esp)
15108+ jmp 1b
15109+5: movl $0,PT_DS(%esp)
15110+ jmp 1b
15111+6: movl $0,PT_ES(%esp)
15112 jmp 1b
15113 .section __ex_table,"a"
15114 .align 4
15115- .long 1b,2b
15116+ .long 1b,4b
15117+ .long 2b,5b
15118+ .long 3b,6b
15119 .popsection
15120 PTGS_TO_GS_EX
15121 ENDPROC(ia32_sysenter_target)
15122@@ -538,6 +720,15 @@ syscall_exit:
15123 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15124 jne syscall_exit_work
15125
15126+restore_all_pax:
15127+
15128+#ifdef CONFIG_PAX_RANDKSTACK
15129+ movl %esp, %eax
15130+ call pax_randomize_kstack
15131+#endif
15132+
15133+ pax_erase_kstack
15134+
15135 restore_all:
15136 TRACE_IRQS_IRET
15137 restore_all_notrace:
15138@@ -602,10 +793,29 @@ ldt_ss:
15139 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15140 mov %dx, %ax /* eax: new kernel esp */
15141 sub %eax, %edx /* offset (low word is 0) */
15142- PER_CPU(gdt_page, %ebx)
15143+#ifdef CONFIG_SMP
15144+ movl PER_CPU_VAR(cpu_number), %ebx
15145+ shll $PAGE_SHIFT_asm, %ebx
15146+ addl $cpu_gdt_table, %ebx
15147+#else
15148+ movl $cpu_gdt_table, %ebx
15149+#endif
15150 shr $16, %edx
15151+
15152+#ifdef CONFIG_PAX_KERNEXEC
15153+ mov %cr0, %esi
15154+ btr $16, %esi
15155+ mov %esi, %cr0
15156+#endif
15157+
15158 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15159 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15160+
15161+#ifdef CONFIG_PAX_KERNEXEC
15162+ bts $16, %esi
15163+ mov %esi, %cr0
15164+#endif
15165+
15166 pushl $__ESPFIX_SS
15167 CFI_ADJUST_CFA_OFFSET 4
15168 push %eax /* new kernel esp */
15169@@ -636,36 +846,30 @@ work_resched:
15170 movl TI_flags(%ebp), %ecx
15171 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15172 # than syscall tracing?
15173- jz restore_all
15174+ jz restore_all_pax
15175 testb $_TIF_NEED_RESCHED, %cl
15176 jnz work_resched
15177
15178 work_notifysig: # deal with pending signals and
15179 # notify-resume requests
15180+ movl %esp, %eax
15181 #ifdef CONFIG_VM86
15182 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15183- movl %esp, %eax
15184- jne work_notifysig_v86 # returning to kernel-space or
15185+ jz 1f # returning to kernel-space or
15186 # vm86-space
15187- xorl %edx, %edx
15188- call do_notify_resume
15189- jmp resume_userspace_sig
15190
15191- ALIGN
15192-work_notifysig_v86:
15193 pushl %ecx # save ti_flags for do_notify_resume
15194 CFI_ADJUST_CFA_OFFSET 4
15195 call save_v86_state # %eax contains pt_regs pointer
15196 popl %ecx
15197 CFI_ADJUST_CFA_OFFSET -4
15198 movl %eax, %esp
15199-#else
15200- movl %esp, %eax
15201+1:
15202 #endif
15203 xorl %edx, %edx
15204 call do_notify_resume
15205 jmp resume_userspace_sig
15206-END(work_pending)
15207+ENDPROC(work_pending)
15208
15209 # perform syscall exit tracing
15210 ALIGN
15211@@ -673,11 +877,14 @@ syscall_trace_entry:
15212 movl $-ENOSYS,PT_EAX(%esp)
15213 movl %esp, %eax
15214 call syscall_trace_enter
15215+
15216+ pax_erase_kstack
15217+
15218 /* What it returned is what we'll actually use. */
15219 cmpl $(nr_syscalls), %eax
15220 jnae syscall_call
15221 jmp syscall_exit
15222-END(syscall_trace_entry)
15223+ENDPROC(syscall_trace_entry)
15224
15225 # perform syscall exit tracing
15226 ALIGN
15227@@ -690,20 +897,24 @@ syscall_exit_work:
15228 movl %esp, %eax
15229 call syscall_trace_leave
15230 jmp resume_userspace
15231-END(syscall_exit_work)
15232+ENDPROC(syscall_exit_work)
15233 CFI_ENDPROC
15234
15235 RING0_INT_FRAME # can't unwind into user space anyway
15236 syscall_fault:
15237+#ifdef CONFIG_PAX_MEMORY_UDEREF
15238+ push %ss
15239+ pop %ds
15240+#endif
15241 GET_THREAD_INFO(%ebp)
15242 movl $-EFAULT,PT_EAX(%esp)
15243 jmp resume_userspace
15244-END(syscall_fault)
15245+ENDPROC(syscall_fault)
15246
15247 syscall_badsys:
15248 movl $-ENOSYS,PT_EAX(%esp)
15249 jmp resume_userspace
15250-END(syscall_badsys)
15251+ENDPROC(syscall_badsys)
15252 CFI_ENDPROC
15253
15254 /*
15255@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15256 PTREGSCALL(vm86)
15257 PTREGSCALL(vm86old)
15258
15259+ ALIGN;
15260+ENTRY(kernel_execve)
15261+ push %ebp
15262+ sub $PT_OLDSS+4,%esp
15263+ push %edi
15264+ push %ecx
15265+ push %eax
15266+ lea 3*4(%esp),%edi
15267+ mov $PT_OLDSS/4+1,%ecx
15268+ xorl %eax,%eax
15269+ rep stosl
15270+ pop %eax
15271+ pop %ecx
15272+ pop %edi
15273+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15274+ mov %eax,PT_EBX(%esp)
15275+ mov %edx,PT_ECX(%esp)
15276+ mov %ecx,PT_EDX(%esp)
15277+ mov %esp,%eax
15278+ call sys_execve
15279+ GET_THREAD_INFO(%ebp)
15280+ test %eax,%eax
15281+ jz syscall_exit
15282+ add $PT_OLDSS+4,%esp
15283+ pop %ebp
15284+ ret
15285+
15286 .macro FIXUP_ESPFIX_STACK
15287 /*
15288 * Switch back for ESPFIX stack to the normal zerobased stack
15289@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15290 * normal stack and adjusts ESP with the matching offset.
15291 */
15292 /* fixup the stack */
15293- PER_CPU(gdt_page, %ebx)
15294+#ifdef CONFIG_SMP
15295+ movl PER_CPU_VAR(cpu_number), %ebx
15296+ shll $PAGE_SHIFT_asm, %ebx
15297+ addl $cpu_gdt_table, %ebx
15298+#else
15299+ movl $cpu_gdt_table, %ebx
15300+#endif
15301 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15302 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15303 shl $16, %eax
15304@@ -793,7 +1037,7 @@ vector=vector+1
15305 .endr
15306 2: jmp common_interrupt
15307 .endr
15308-END(irq_entries_start)
15309+ENDPROC(irq_entries_start)
15310
15311 .previous
15312 END(interrupt)
15313@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15314 CFI_ADJUST_CFA_OFFSET 4
15315 jmp error_code
15316 CFI_ENDPROC
15317-END(coprocessor_error)
15318+ENDPROC(coprocessor_error)
15319
15320 ENTRY(simd_coprocessor_error)
15321 RING0_INT_FRAME
15322@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15323 CFI_ADJUST_CFA_OFFSET 4
15324 jmp error_code
15325 CFI_ENDPROC
15326-END(simd_coprocessor_error)
15327+ENDPROC(simd_coprocessor_error)
15328
15329 ENTRY(device_not_available)
15330 RING0_INT_FRAME
15331@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15332 CFI_ADJUST_CFA_OFFSET 4
15333 jmp error_code
15334 CFI_ENDPROC
15335-END(device_not_available)
15336+ENDPROC(device_not_available)
15337
15338 #ifdef CONFIG_PARAVIRT
15339 ENTRY(native_iret)
15340@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15341 .align 4
15342 .long native_iret, iret_exc
15343 .previous
15344-END(native_iret)
15345+ENDPROC(native_iret)
15346
15347 ENTRY(native_irq_enable_sysexit)
15348 sti
15349 sysexit
15350-END(native_irq_enable_sysexit)
15351+ENDPROC(native_irq_enable_sysexit)
15352 #endif
15353
15354 ENTRY(overflow)
15355@@ -885,7 +1129,7 @@ ENTRY(overflow)
15356 CFI_ADJUST_CFA_OFFSET 4
15357 jmp error_code
15358 CFI_ENDPROC
15359-END(overflow)
15360+ENDPROC(overflow)
15361
15362 ENTRY(bounds)
15363 RING0_INT_FRAME
15364@@ -895,7 +1139,7 @@ ENTRY(bounds)
15365 CFI_ADJUST_CFA_OFFSET 4
15366 jmp error_code
15367 CFI_ENDPROC
15368-END(bounds)
15369+ENDPROC(bounds)
15370
15371 ENTRY(invalid_op)
15372 RING0_INT_FRAME
15373@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15374 CFI_ADJUST_CFA_OFFSET 4
15375 jmp error_code
15376 CFI_ENDPROC
15377-END(invalid_op)
15378+ENDPROC(invalid_op)
15379
15380 ENTRY(coprocessor_segment_overrun)
15381 RING0_INT_FRAME
15382@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15383 CFI_ADJUST_CFA_OFFSET 4
15384 jmp error_code
15385 CFI_ENDPROC
15386-END(coprocessor_segment_overrun)
15387+ENDPROC(coprocessor_segment_overrun)
15388
15389 ENTRY(invalid_TSS)
15390 RING0_EC_FRAME
15391@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15392 CFI_ADJUST_CFA_OFFSET 4
15393 jmp error_code
15394 CFI_ENDPROC
15395-END(invalid_TSS)
15396+ENDPROC(invalid_TSS)
15397
15398 ENTRY(segment_not_present)
15399 RING0_EC_FRAME
15400@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15401 CFI_ADJUST_CFA_OFFSET 4
15402 jmp error_code
15403 CFI_ENDPROC
15404-END(segment_not_present)
15405+ENDPROC(segment_not_present)
15406
15407 ENTRY(stack_segment)
15408 RING0_EC_FRAME
15409@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15410 CFI_ADJUST_CFA_OFFSET 4
15411 jmp error_code
15412 CFI_ENDPROC
15413-END(stack_segment)
15414+ENDPROC(stack_segment)
15415
15416 ENTRY(alignment_check)
15417 RING0_EC_FRAME
15418@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15419 CFI_ADJUST_CFA_OFFSET 4
15420 jmp error_code
15421 CFI_ENDPROC
15422-END(alignment_check)
15423+ENDPROC(alignment_check)
15424
15425 ENTRY(divide_error)
15426 RING0_INT_FRAME
15427@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15428 CFI_ADJUST_CFA_OFFSET 4
15429 jmp error_code
15430 CFI_ENDPROC
15431-END(divide_error)
15432+ENDPROC(divide_error)
15433
15434 #ifdef CONFIG_X86_MCE
15435 ENTRY(machine_check)
15436@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15437 CFI_ADJUST_CFA_OFFSET 4
15438 jmp error_code
15439 CFI_ENDPROC
15440-END(machine_check)
15441+ENDPROC(machine_check)
15442 #endif
15443
15444 ENTRY(spurious_interrupt_bug)
15445@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15446 CFI_ADJUST_CFA_OFFSET 4
15447 jmp error_code
15448 CFI_ENDPROC
15449-END(spurious_interrupt_bug)
15450+ENDPROC(spurious_interrupt_bug)
15451
15452 ENTRY(kernel_thread_helper)
15453 pushl $0 # fake return address for unwinder
15454@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15455
15456 ENTRY(mcount)
15457 ret
15458-END(mcount)
15459+ENDPROC(mcount)
15460
15461 ENTRY(ftrace_caller)
15462 cmpl $0, function_trace_stop
15463@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15464 .globl ftrace_stub
15465 ftrace_stub:
15466 ret
15467-END(ftrace_caller)
15468+ENDPROC(ftrace_caller)
15469
15470 #else /* ! CONFIG_DYNAMIC_FTRACE */
15471
15472@@ -1160,7 +1404,7 @@ trace:
15473 popl %ecx
15474 popl %eax
15475 jmp ftrace_stub
15476-END(mcount)
15477+ENDPROC(mcount)
15478 #endif /* CONFIG_DYNAMIC_FTRACE */
15479 #endif /* CONFIG_FUNCTION_TRACER */
15480
15481@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15482 popl %ecx
15483 popl %eax
15484 ret
15485-END(ftrace_graph_caller)
15486+ENDPROC(ftrace_graph_caller)
15487
15488 .globl return_to_handler
15489 return_to_handler:
15490@@ -1198,7 +1442,6 @@ return_to_handler:
15491 ret
15492 #endif
15493
15494-.section .rodata,"a"
15495 #include "syscall_table_32.S"
15496
15497 syscall_table_size=(.-sys_call_table)
15498@@ -1255,15 +1498,18 @@ error_code:
15499 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15500 REG_TO_PTGS %ecx
15501 SET_KERNEL_GS %ecx
15502- movl $(__USER_DS), %ecx
15503+ movl $(__KERNEL_DS), %ecx
15504 movl %ecx, %ds
15505 movl %ecx, %es
15506+
15507+ pax_enter_kernel
15508+
15509 TRACE_IRQS_OFF
15510 movl %esp,%eax # pt_regs pointer
15511 call *%edi
15512 jmp ret_from_exception
15513 CFI_ENDPROC
15514-END(page_fault)
15515+ENDPROC(page_fault)
15516
15517 /*
15518 * Debug traps and NMI can happen at the one SYSENTER instruction
15519@@ -1309,7 +1555,7 @@ debug_stack_correct:
15520 call do_debug
15521 jmp ret_from_exception
15522 CFI_ENDPROC
15523-END(debug)
15524+ENDPROC(debug)
15525
15526 /*
15527 * NMI is doubly nasty. It can happen _while_ we're handling
15528@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15529 xorl %edx,%edx # zero error code
15530 movl %esp,%eax # pt_regs pointer
15531 call do_nmi
15532+
15533+ pax_exit_kernel
15534+
15535 jmp restore_all_notrace
15536 CFI_ENDPROC
15537
15538@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15539 FIXUP_ESPFIX_STACK # %eax == %esp
15540 xorl %edx,%edx # zero error code
15541 call do_nmi
15542+
15543+ pax_exit_kernel
15544+
15545 RESTORE_REGS
15546 lss 12+4(%esp), %esp # back to espfix stack
15547 CFI_ADJUST_CFA_OFFSET -24
15548 jmp irq_return
15549 CFI_ENDPROC
15550-END(nmi)
15551+ENDPROC(nmi)
15552
15553 ENTRY(int3)
15554 RING0_INT_FRAME
15555@@ -1409,7 +1661,7 @@ ENTRY(int3)
15556 call do_int3
15557 jmp ret_from_exception
15558 CFI_ENDPROC
15559-END(int3)
15560+ENDPROC(int3)
15561
15562 ENTRY(general_protection)
15563 RING0_EC_FRAME
15564@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15565 CFI_ADJUST_CFA_OFFSET 4
15566 jmp error_code
15567 CFI_ENDPROC
15568-END(general_protection)
15569+ENDPROC(general_protection)
15570
15571 /*
15572 * End of kprobes section
15573diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15574index 34a56a9..a4abbbe 100644
15575--- a/arch/x86/kernel/entry_64.S
15576+++ b/arch/x86/kernel/entry_64.S
15577@@ -53,6 +53,8 @@
15578 #include <asm/paravirt.h>
15579 #include <asm/ftrace.h>
15580 #include <asm/percpu.h>
15581+#include <asm/pgtable.h>
15582+#include <asm/alternative-asm.h>
15583
15584 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15585 #include <linux/elf-em.h>
15586@@ -64,8 +66,9 @@
15587 #ifdef CONFIG_FUNCTION_TRACER
15588 #ifdef CONFIG_DYNAMIC_FTRACE
15589 ENTRY(mcount)
15590+ pax_force_retaddr
15591 retq
15592-END(mcount)
15593+ENDPROC(mcount)
15594
15595 ENTRY(ftrace_caller)
15596 cmpl $0, function_trace_stop
15597@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15598 #endif
15599
15600 GLOBAL(ftrace_stub)
15601+ pax_force_retaddr
15602 retq
15603-END(ftrace_caller)
15604+ENDPROC(ftrace_caller)
15605
15606 #else /* ! CONFIG_DYNAMIC_FTRACE */
15607 ENTRY(mcount)
15608@@ -108,6 +112,7 @@ ENTRY(mcount)
15609 #endif
15610
15611 GLOBAL(ftrace_stub)
15612+ pax_force_retaddr
15613 retq
15614
15615 trace:
15616@@ -117,12 +122,13 @@ trace:
15617 movq 8(%rbp), %rsi
15618 subq $MCOUNT_INSN_SIZE, %rdi
15619
15620+ pax_force_fptr ftrace_trace_function
15621 call *ftrace_trace_function
15622
15623 MCOUNT_RESTORE_FRAME
15624
15625 jmp ftrace_stub
15626-END(mcount)
15627+ENDPROC(mcount)
15628 #endif /* CONFIG_DYNAMIC_FTRACE */
15629 #endif /* CONFIG_FUNCTION_TRACER */
15630
15631@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15632
15633 MCOUNT_RESTORE_FRAME
15634
15635+ pax_force_retaddr
15636 retq
15637-END(ftrace_graph_caller)
15638+ENDPROC(ftrace_graph_caller)
15639
15640 GLOBAL(return_to_handler)
15641 subq $24, %rsp
15642@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15643 movq 8(%rsp), %rdx
15644 movq (%rsp), %rax
15645 addq $16, %rsp
15646+ pax_force_retaddr
15647 retq
15648 #endif
15649
15650@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15651 ENDPROC(native_usergs_sysret64)
15652 #endif /* CONFIG_PARAVIRT */
15653
15654+ .macro ljmpq sel, off
15655+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15656+ .byte 0x48; ljmp *1234f(%rip)
15657+ .pushsection .rodata
15658+ .align 16
15659+ 1234: .quad \off; .word \sel
15660+ .popsection
15661+#else
15662+ pushq $\sel
15663+ pushq $\off
15664+ lretq
15665+#endif
15666+ .endm
15667+
15668+ .macro pax_enter_kernel
15669+ pax_set_fptr_mask
15670+#ifdef CONFIG_PAX_KERNEXEC
15671+ call pax_enter_kernel
15672+#endif
15673+ .endm
15674+
15675+ .macro pax_exit_kernel
15676+#ifdef CONFIG_PAX_KERNEXEC
15677+ call pax_exit_kernel
15678+#endif
15679+ .endm
15680+
15681+#ifdef CONFIG_PAX_KERNEXEC
15682+ENTRY(pax_enter_kernel)
15683+ pushq %rdi
15684+
15685+#ifdef CONFIG_PARAVIRT
15686+ PV_SAVE_REGS(CLBR_RDI)
15687+#endif
15688+
15689+ GET_CR0_INTO_RDI
15690+ bts $16,%rdi
15691+ jnc 3f
15692+ mov %cs,%edi
15693+ cmp $__KERNEL_CS,%edi
15694+ jnz 2f
15695+1:
15696+
15697+#ifdef CONFIG_PARAVIRT
15698+ PV_RESTORE_REGS(CLBR_RDI)
15699+#endif
15700+
15701+ popq %rdi
15702+ pax_force_retaddr
15703+ retq
15704+
15705+2: ljmpq __KERNEL_CS,1f
15706+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15707+4: SET_RDI_INTO_CR0
15708+ jmp 1b
15709+ENDPROC(pax_enter_kernel)
15710+
15711+ENTRY(pax_exit_kernel)
15712+ pushq %rdi
15713+
15714+#ifdef CONFIG_PARAVIRT
15715+ PV_SAVE_REGS(CLBR_RDI)
15716+#endif
15717+
15718+ mov %cs,%rdi
15719+ cmp $__KERNEXEC_KERNEL_CS,%edi
15720+ jz 2f
15721+1:
15722+
15723+#ifdef CONFIG_PARAVIRT
15724+ PV_RESTORE_REGS(CLBR_RDI);
15725+#endif
15726+
15727+ popq %rdi
15728+ pax_force_retaddr
15729+ retq
15730+
15731+2: GET_CR0_INTO_RDI
15732+ btr $16,%rdi
15733+ ljmpq __KERNEL_CS,3f
15734+3: SET_RDI_INTO_CR0
15735+ jmp 1b
15736+#ifdef CONFIG_PARAVIRT
15737+ PV_RESTORE_REGS(CLBR_RDI);
15738+#endif
15739+
15740+ popq %rdi
15741+ pax_force_retaddr
15742+ retq
15743+ENDPROC(pax_exit_kernel)
15744+#endif
15745+
15746+ .macro pax_enter_kernel_user
15747+ pax_set_fptr_mask
15748+#ifdef CONFIG_PAX_MEMORY_UDEREF
15749+ call pax_enter_kernel_user
15750+#endif
15751+ .endm
15752+
15753+ .macro pax_exit_kernel_user
15754+#ifdef CONFIG_PAX_MEMORY_UDEREF
15755+ call pax_exit_kernel_user
15756+#endif
15757+#ifdef CONFIG_PAX_RANDKSTACK
15758+ push %rax
15759+ call pax_randomize_kstack
15760+ pop %rax
15761+#endif
15762+ .endm
15763+
15764+#ifdef CONFIG_PAX_MEMORY_UDEREF
15765+ENTRY(pax_enter_kernel_user)
15766+ pushq %rdi
15767+ pushq %rbx
15768+
15769+#ifdef CONFIG_PARAVIRT
15770+ PV_SAVE_REGS(CLBR_RDI)
15771+#endif
15772+
15773+ GET_CR3_INTO_RDI
15774+ mov %rdi,%rbx
15775+ add $__START_KERNEL_map,%rbx
15776+ sub phys_base(%rip),%rbx
15777+
15778+#ifdef CONFIG_PARAVIRT
15779+ pushq %rdi
15780+ cmpl $0, pv_info+PARAVIRT_enabled
15781+ jz 1f
15782+ i = 0
15783+ .rept USER_PGD_PTRS
15784+ mov i*8(%rbx),%rsi
15785+ mov $0,%sil
15786+ lea i*8(%rbx),%rdi
15787+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15788+ i = i + 1
15789+ .endr
15790+ jmp 2f
15791+1:
15792+#endif
15793+
15794+ i = 0
15795+ .rept USER_PGD_PTRS
15796+ movb $0,i*8(%rbx)
15797+ i = i + 1
15798+ .endr
15799+
15800+#ifdef CONFIG_PARAVIRT
15801+2: popq %rdi
15802+#endif
15803+ SET_RDI_INTO_CR3
15804+
15805+#ifdef CONFIG_PAX_KERNEXEC
15806+ GET_CR0_INTO_RDI
15807+ bts $16,%rdi
15808+ SET_RDI_INTO_CR0
15809+#endif
15810+
15811+#ifdef CONFIG_PARAVIRT
15812+ PV_RESTORE_REGS(CLBR_RDI)
15813+#endif
15814+
15815+ popq %rbx
15816+ popq %rdi
15817+ pax_force_retaddr
15818+ retq
15819+ENDPROC(pax_enter_kernel_user)
15820+
15821+ENTRY(pax_exit_kernel_user)
15822+ push %rdi
15823+
15824+#ifdef CONFIG_PARAVIRT
15825+ pushq %rbx
15826+ PV_SAVE_REGS(CLBR_RDI)
15827+#endif
15828+
15829+#ifdef CONFIG_PAX_KERNEXEC
15830+ GET_CR0_INTO_RDI
15831+ btr $16,%rdi
15832+ SET_RDI_INTO_CR0
15833+#endif
15834+
15835+ GET_CR3_INTO_RDI
15836+ add $__START_KERNEL_map,%rdi
15837+ sub phys_base(%rip),%rdi
15838+
15839+#ifdef CONFIG_PARAVIRT
15840+ cmpl $0, pv_info+PARAVIRT_enabled
15841+ jz 1f
15842+ mov %rdi,%rbx
15843+ i = 0
15844+ .rept USER_PGD_PTRS
15845+ mov i*8(%rbx),%rsi
15846+ mov $0x67,%sil
15847+ lea i*8(%rbx),%rdi
15848+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15849+ i = i + 1
15850+ .endr
15851+ jmp 2f
15852+1:
15853+#endif
15854+
15855+ i = 0
15856+ .rept USER_PGD_PTRS
15857+ movb $0x67,i*8(%rdi)
15858+ i = i + 1
15859+ .endr
15860+
15861+#ifdef CONFIG_PARAVIRT
15862+2: PV_RESTORE_REGS(CLBR_RDI)
15863+ popq %rbx
15864+#endif
15865+
15866+ popq %rdi
15867+ pax_force_retaddr
15868+ retq
15869+ENDPROC(pax_exit_kernel_user)
15870+#endif
15871+
15872+.macro pax_erase_kstack
15873+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15874+ call pax_erase_kstack
15875+#endif
15876+.endm
15877+
15878+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15879+/*
15880+ * r11: thread_info
15881+ * rcx, rdx: can be clobbered
15882+ */
15883+ENTRY(pax_erase_kstack)
15884+ pushq %rdi
15885+ pushq %rax
15886+ pushq %r11
15887+
15888+ GET_THREAD_INFO(%r11)
15889+ mov TI_lowest_stack(%r11), %rdi
15890+ mov $-0xBEEF, %rax
15891+ std
15892+
15893+1: mov %edi, %ecx
15894+ and $THREAD_SIZE_asm - 1, %ecx
15895+ shr $3, %ecx
15896+ repne scasq
15897+ jecxz 2f
15898+
15899+ cmp $2*8, %ecx
15900+ jc 2f
15901+
15902+ mov $2*8, %ecx
15903+ repe scasq
15904+ jecxz 2f
15905+ jne 1b
15906+
15907+2: cld
15908+ mov %esp, %ecx
15909+ sub %edi, %ecx
15910+
15911+ cmp $THREAD_SIZE_asm, %rcx
15912+ jb 3f
15913+ ud2
15914+3:
15915+
15916+ shr $3, %ecx
15917+ rep stosq
15918+
15919+ mov TI_task_thread_sp0(%r11), %rdi
15920+ sub $256, %rdi
15921+ mov %rdi, TI_lowest_stack(%r11)
15922+
15923+ popq %r11
15924+ popq %rax
15925+ popq %rdi
15926+ pax_force_retaddr
15927+ ret
15928+ENDPROC(pax_erase_kstack)
15929+#endif
15930
15931 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15932 #ifdef CONFIG_TRACE_IRQFLAGS
15933@@ -317,7 +601,7 @@ ENTRY(save_args)
15934 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
15935 movq_cfi rbp, 8 /* push %rbp */
15936 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
15937- testl $3, CS(%rdi)
15938+ testb $3, CS(%rdi)
15939 je 1f
15940 SWAPGS
15941 /*
15942@@ -337,9 +621,10 @@ ENTRY(save_args)
15943 * We entered an interrupt context - irqs are off:
15944 */
15945 2: TRACE_IRQS_OFF
15946+ pax_force_retaddr
15947 ret
15948 CFI_ENDPROC
15949-END(save_args)
15950+ENDPROC(save_args)
15951
15952 ENTRY(save_rest)
15953 PARTIAL_FRAME 1 REST_SKIP+8
15954@@ -352,9 +637,10 @@ ENTRY(save_rest)
15955 movq_cfi r15, R15+16
15956 movq %r11, 8(%rsp) /* return address */
15957 FIXUP_TOP_OF_STACK %r11, 16
15958+ pax_force_retaddr
15959 ret
15960 CFI_ENDPROC
15961-END(save_rest)
15962+ENDPROC(save_rest)
15963
15964 /* save complete stack frame */
15965 .pushsection .kprobes.text, "ax"
15966@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
15967 js 1f /* negative -> in kernel */
15968 SWAPGS
15969 xorl %ebx,%ebx
15970-1: ret
15971+1: pax_force_retaddr_bts
15972+ ret
15973 CFI_ENDPROC
15974-END(save_paranoid)
15975+ENDPROC(save_paranoid)
15976 .popsection
15977
15978 /*
15979@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
15980
15981 RESTORE_REST
15982
15983- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15984+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15985 je int_ret_from_sys_call
15986
15987 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15988@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
15989 jmp ret_from_sys_call # go to the SYSRET fastpath
15990
15991 CFI_ENDPROC
15992-END(ret_from_fork)
15993+ENDPROC(ret_from_fork)
15994
15995 /*
15996 * System call entry. Upto 6 arguments in registers are supported.
15997@@ -455,7 +742,7 @@ END(ret_from_fork)
15998 ENTRY(system_call)
15999 CFI_STARTPROC simple
16000 CFI_SIGNAL_FRAME
16001- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16002+ CFI_DEF_CFA rsp,0
16003 CFI_REGISTER rip,rcx
16004 /*CFI_REGISTER rflags,r11*/
16005 SWAPGS_UNSAFE_STACK
16006@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16007
16008 movq %rsp,PER_CPU_VAR(old_rsp)
16009 movq PER_CPU_VAR(kernel_stack),%rsp
16010+ SAVE_ARGS 8*6,1
16011+ pax_enter_kernel_user
16012 /*
16013 * No need to follow this irqs off/on section - it's straight
16014 * and short:
16015 */
16016 ENABLE_INTERRUPTS(CLBR_NONE)
16017- SAVE_ARGS 8,1
16018 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16019 movq %rcx,RIP-ARGOFFSET(%rsp)
16020 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16021@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16022 system_call_fastpath:
16023 cmpq $__NR_syscall_max,%rax
16024 ja badsys
16025- movq %r10,%rcx
16026+ movq R10-ARGOFFSET(%rsp),%rcx
16027 call *sys_call_table(,%rax,8) # XXX: rip relative
16028 movq %rax,RAX-ARGOFFSET(%rsp)
16029 /*
16030@@ -502,6 +790,8 @@ sysret_check:
16031 andl %edi,%edx
16032 jnz sysret_careful
16033 CFI_REMEMBER_STATE
16034+ pax_exit_kernel_user
16035+ pax_erase_kstack
16036 /*
16037 * sysretq will re-enable interrupts:
16038 */
16039@@ -555,14 +845,18 @@ badsys:
16040 * jump back to the normal fast path.
16041 */
16042 auditsys:
16043- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16044+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16045 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16046 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16047 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16048 movq %rax,%rsi /* 2nd arg: syscall number */
16049 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16050 call audit_syscall_entry
16051+
16052+ pax_erase_kstack
16053+
16054 LOAD_ARGS 0 /* reload call-clobbered registers */
16055+ pax_set_fptr_mask
16056 jmp system_call_fastpath
16057
16058 /*
16059@@ -592,16 +886,20 @@ tracesys:
16060 FIXUP_TOP_OF_STACK %rdi
16061 movq %rsp,%rdi
16062 call syscall_trace_enter
16063+
16064+ pax_erase_kstack
16065+
16066 /*
16067 * Reload arg registers from stack in case ptrace changed them.
16068 * We don't reload %rax because syscall_trace_enter() returned
16069 * the value it wants us to use in the table lookup.
16070 */
16071 LOAD_ARGS ARGOFFSET, 1
16072+ pax_set_fptr_mask
16073 RESTORE_REST
16074 cmpq $__NR_syscall_max,%rax
16075 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16076- movq %r10,%rcx /* fixup for C */
16077+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16078 call *sys_call_table(,%rax,8)
16079 movq %rax,RAX-ARGOFFSET(%rsp)
16080 /* Use IRET because user could have changed frame */
16081@@ -613,7 +911,7 @@ tracesys:
16082 GLOBAL(int_ret_from_sys_call)
16083 DISABLE_INTERRUPTS(CLBR_NONE)
16084 TRACE_IRQS_OFF
16085- testl $3,CS-ARGOFFSET(%rsp)
16086+ testb $3,CS-ARGOFFSET(%rsp)
16087 je retint_restore_args
16088 movl $_TIF_ALLWORK_MASK,%edi
16089 /* edi: mask to check */
16090@@ -674,7 +972,7 @@ int_restore_rest:
16091 TRACE_IRQS_OFF
16092 jmp int_with_check
16093 CFI_ENDPROC
16094-END(system_call)
16095+ENDPROC(system_call)
16096
16097 /*
16098 * Certain special system calls that need to save a complete full stack frame.
16099@@ -690,7 +988,7 @@ ENTRY(\label)
16100 call \func
16101 jmp ptregscall_common
16102 CFI_ENDPROC
16103-END(\label)
16104+ENDPROC(\label)
16105 .endm
16106
16107 PTREGSCALL stub_clone, sys_clone, %r8
16108@@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16109 movq_cfi_restore R12+8, r12
16110 movq_cfi_restore RBP+8, rbp
16111 movq_cfi_restore RBX+8, rbx
16112+ pax_force_retaddr
16113 ret $REST_SKIP /* pop extended registers */
16114 CFI_ENDPROC
16115-END(ptregscall_common)
16116+ENDPROC(ptregscall_common)
16117
16118 ENTRY(stub_execve)
16119 CFI_STARTPROC
16120@@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16121 RESTORE_REST
16122 jmp int_ret_from_sys_call
16123 CFI_ENDPROC
16124-END(stub_execve)
16125+ENDPROC(stub_execve)
16126
16127 /*
16128 * sigreturn is special because it needs to restore all registers on return.
16129@@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16130 RESTORE_REST
16131 jmp int_ret_from_sys_call
16132 CFI_ENDPROC
16133-END(stub_rt_sigreturn)
16134+ENDPROC(stub_rt_sigreturn)
16135
16136 /*
16137 * Build the entry stubs and pointer table with some assembler magic.
16138@@ -780,7 +1079,7 @@ vector=vector+1
16139 2: jmp common_interrupt
16140 .endr
16141 CFI_ENDPROC
16142-END(irq_entries_start)
16143+ENDPROC(irq_entries_start)
16144
16145 .previous
16146 END(interrupt)
16147@@ -800,6 +1099,16 @@ END(interrupt)
16148 CFI_ADJUST_CFA_OFFSET 10*8
16149 call save_args
16150 PARTIAL_FRAME 0
16151+#ifdef CONFIG_PAX_MEMORY_UDEREF
16152+ testb $3, CS(%rdi)
16153+ jnz 1f
16154+ pax_enter_kernel
16155+ jmp 2f
16156+1: pax_enter_kernel_user
16157+2:
16158+#else
16159+ pax_enter_kernel
16160+#endif
16161 call \func
16162 .endm
16163
16164@@ -822,7 +1131,7 @@ ret_from_intr:
16165 CFI_ADJUST_CFA_OFFSET -8
16166 exit_intr:
16167 GET_THREAD_INFO(%rcx)
16168- testl $3,CS-ARGOFFSET(%rsp)
16169+ testb $3,CS-ARGOFFSET(%rsp)
16170 je retint_kernel
16171
16172 /* Interrupt came from user space */
16173@@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16174 * The iretq could re-enable interrupts:
16175 */
16176 DISABLE_INTERRUPTS(CLBR_ANY)
16177+ pax_exit_kernel_user
16178+ pax_erase_kstack
16179 TRACE_IRQS_IRETQ
16180 SWAPGS
16181 jmp restore_args
16182
16183 retint_restore_args: /* return to kernel space */
16184 DISABLE_INTERRUPTS(CLBR_ANY)
16185+ pax_exit_kernel
16186+ pax_force_retaddr RIP-ARGOFFSET
16187 /*
16188 * The iretq could re-enable interrupts:
16189 */
16190@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16191 #endif
16192
16193 CFI_ENDPROC
16194-END(common_interrupt)
16195+ENDPROC(common_interrupt)
16196
16197 /*
16198 * APIC interrupts.
16199@@ -953,7 +1266,7 @@ ENTRY(\sym)
16200 interrupt \do_sym
16201 jmp ret_from_intr
16202 CFI_ENDPROC
16203-END(\sym)
16204+ENDPROC(\sym)
16205 .endm
16206
16207 #ifdef CONFIG_SMP
16208@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16209 CFI_ADJUST_CFA_OFFSET 15*8
16210 call error_entry
16211 DEFAULT_FRAME 0
16212+#ifdef CONFIG_PAX_MEMORY_UDEREF
16213+ testb $3, CS(%rsp)
16214+ jnz 1f
16215+ pax_enter_kernel
16216+ jmp 2f
16217+1: pax_enter_kernel_user
16218+2:
16219+#else
16220+ pax_enter_kernel
16221+#endif
16222 movq %rsp,%rdi /* pt_regs pointer */
16223 xorl %esi,%esi /* no error code */
16224 call \do_sym
16225 jmp error_exit /* %ebx: no swapgs flag */
16226 CFI_ENDPROC
16227-END(\sym)
16228+ENDPROC(\sym)
16229 .endm
16230
16231 .macro paranoidzeroentry sym do_sym
16232@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16233 subq $15*8, %rsp
16234 call save_paranoid
16235 TRACE_IRQS_OFF
16236+#ifdef CONFIG_PAX_MEMORY_UDEREF
16237+ testb $3, CS(%rsp)
16238+ jnz 1f
16239+ pax_enter_kernel
16240+ jmp 2f
16241+1: pax_enter_kernel_user
16242+2:
16243+#else
16244+ pax_enter_kernel
16245+#endif
16246 movq %rsp,%rdi /* pt_regs pointer */
16247 xorl %esi,%esi /* no error code */
16248 call \do_sym
16249 jmp paranoid_exit /* %ebx: no swapgs flag */
16250 CFI_ENDPROC
16251-END(\sym)
16252+ENDPROC(\sym)
16253 .endm
16254
16255 .macro paranoidzeroentry_ist sym do_sym ist
16256@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16257 subq $15*8, %rsp
16258 call save_paranoid
16259 TRACE_IRQS_OFF
16260+#ifdef CONFIG_PAX_MEMORY_UDEREF
16261+ testb $3, CS(%rsp)
16262+ jnz 1f
16263+ pax_enter_kernel
16264+ jmp 2f
16265+1: pax_enter_kernel_user
16266+2:
16267+#else
16268+ pax_enter_kernel
16269+#endif
16270 movq %rsp,%rdi /* pt_regs pointer */
16271 xorl %esi,%esi /* no error code */
16272- PER_CPU(init_tss, %rbp)
16273+#ifdef CONFIG_SMP
16274+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16275+ lea init_tss(%rbp), %rbp
16276+#else
16277+ lea init_tss(%rip), %rbp
16278+#endif
16279 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16280 call \do_sym
16281 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16282 jmp paranoid_exit /* %ebx: no swapgs flag */
16283 CFI_ENDPROC
16284-END(\sym)
16285+ENDPROC(\sym)
16286 .endm
16287
16288 .macro errorentry sym do_sym
16289@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16290 CFI_ADJUST_CFA_OFFSET 15*8
16291 call error_entry
16292 DEFAULT_FRAME 0
16293+#ifdef CONFIG_PAX_MEMORY_UDEREF
16294+ testb $3, CS(%rsp)
16295+ jnz 1f
16296+ pax_enter_kernel
16297+ jmp 2f
16298+1: pax_enter_kernel_user
16299+2:
16300+#else
16301+ pax_enter_kernel
16302+#endif
16303 movq %rsp,%rdi /* pt_regs pointer */
16304 movq ORIG_RAX(%rsp),%rsi /* get error code */
16305 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16306 call \do_sym
16307 jmp error_exit /* %ebx: no swapgs flag */
16308 CFI_ENDPROC
16309-END(\sym)
16310+ENDPROC(\sym)
16311 .endm
16312
16313 /* error code is on the stack already */
16314@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16315 call save_paranoid
16316 DEFAULT_FRAME 0
16317 TRACE_IRQS_OFF
16318+#ifdef CONFIG_PAX_MEMORY_UDEREF
16319+ testb $3, CS(%rsp)
16320+ jnz 1f
16321+ pax_enter_kernel
16322+ jmp 2f
16323+1: pax_enter_kernel_user
16324+2:
16325+#else
16326+ pax_enter_kernel
16327+#endif
16328 movq %rsp,%rdi /* pt_regs pointer */
16329 movq ORIG_RAX(%rsp),%rsi /* get error code */
16330 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16331 call \do_sym
16332 jmp paranoid_exit /* %ebx: no swapgs flag */
16333 CFI_ENDPROC
16334-END(\sym)
16335+ENDPROC(\sym)
16336 .endm
16337
16338 zeroentry divide_error do_divide_error
16339@@ -1141,9 +1509,10 @@ gs_change:
16340 SWAPGS
16341 popf
16342 CFI_ADJUST_CFA_OFFSET -8
16343+ pax_force_retaddr
16344 ret
16345 CFI_ENDPROC
16346-END(native_load_gs_index)
16347+ENDPROC(native_load_gs_index)
16348
16349 .section __ex_table,"a"
16350 .align 8
16351@@ -1195,9 +1564,10 @@ ENTRY(kernel_thread)
16352 */
16353 RESTORE_ALL
16354 UNFAKE_STACK_FRAME
16355+ pax_force_retaddr
16356 ret
16357 CFI_ENDPROC
16358-END(kernel_thread)
16359+ENDPROC(kernel_thread)
16360
16361 ENTRY(child_rip)
16362 pushq $0 # fake return address
16363@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16364 */
16365 movq %rdi, %rax
16366 movq %rsi, %rdi
16367+ pax_force_fptr %rax
16368 call *%rax
16369 # exit
16370 mov %eax, %edi
16371 call do_exit
16372 ud2 # padding for call trace
16373 CFI_ENDPROC
16374-END(child_rip)
16375+ENDPROC(child_rip)
16376
16377 /*
16378 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16379@@ -1243,9 +1614,10 @@ ENTRY(kernel_execve)
16380 je int_ret_from_sys_call
16381 RESTORE_ARGS
16382 UNFAKE_STACK_FRAME
16383+ pax_force_retaddr
16384 ret
16385 CFI_ENDPROC
16386-END(kernel_execve)
16387+ENDPROC(kernel_execve)
16388
16389 /* Call softirq on interrupt stack. Interrupts are off. */
16390 ENTRY(call_softirq)
16391@@ -1263,9 +1635,10 @@ ENTRY(call_softirq)
16392 CFI_DEF_CFA_REGISTER rsp
16393 CFI_ADJUST_CFA_OFFSET -8
16394 decl PER_CPU_VAR(irq_count)
16395+ pax_force_retaddr
16396 ret
16397 CFI_ENDPROC
16398-END(call_softirq)
16399+ENDPROC(call_softirq)
16400
16401 #ifdef CONFIG_XEN
16402 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16403@@ -1303,7 +1676,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16404 decl PER_CPU_VAR(irq_count)
16405 jmp error_exit
16406 CFI_ENDPROC
16407-END(xen_do_hypervisor_callback)
16408+ENDPROC(xen_do_hypervisor_callback)
16409
16410 /*
16411 * Hypervisor uses this for application faults while it executes.
16412@@ -1362,7 +1735,7 @@ ENTRY(xen_failsafe_callback)
16413 SAVE_ALL
16414 jmp error_exit
16415 CFI_ENDPROC
16416-END(xen_failsafe_callback)
16417+ENDPROC(xen_failsafe_callback)
16418
16419 #endif /* CONFIG_XEN */
16420
16421@@ -1405,16 +1778,31 @@ ENTRY(paranoid_exit)
16422 TRACE_IRQS_OFF
16423 testl %ebx,%ebx /* swapgs needed? */
16424 jnz paranoid_restore
16425- testl $3,CS(%rsp)
16426+ testb $3,CS(%rsp)
16427 jnz paranoid_userspace
16428+#ifdef CONFIG_PAX_MEMORY_UDEREF
16429+ pax_exit_kernel
16430+ TRACE_IRQS_IRETQ 0
16431+ SWAPGS_UNSAFE_STACK
16432+ RESTORE_ALL 8
16433+ pax_force_retaddr_bts
16434+ jmp irq_return
16435+#endif
16436 paranoid_swapgs:
16437+#ifdef CONFIG_PAX_MEMORY_UDEREF
16438+ pax_exit_kernel_user
16439+#else
16440+ pax_exit_kernel
16441+#endif
16442 TRACE_IRQS_IRETQ 0
16443 SWAPGS_UNSAFE_STACK
16444 RESTORE_ALL 8
16445 jmp irq_return
16446 paranoid_restore:
16447+ pax_exit_kernel
16448 TRACE_IRQS_IRETQ 0
16449 RESTORE_ALL 8
16450+ pax_force_retaddr_bts
16451 jmp irq_return
16452 paranoid_userspace:
16453 GET_THREAD_INFO(%rcx)
16454@@ -1443,7 +1831,7 @@ paranoid_schedule:
16455 TRACE_IRQS_OFF
16456 jmp paranoid_userspace
16457 CFI_ENDPROC
16458-END(paranoid_exit)
16459+ENDPROC(paranoid_exit)
16460
16461 /*
16462 * Exception entry point. This expects an error code/orig_rax on the stack.
16463@@ -1470,12 +1858,13 @@ ENTRY(error_entry)
16464 movq_cfi r14, R14+8
16465 movq_cfi r15, R15+8
16466 xorl %ebx,%ebx
16467- testl $3,CS+8(%rsp)
16468+ testb $3,CS+8(%rsp)
16469 je error_kernelspace
16470 error_swapgs:
16471 SWAPGS
16472 error_sti:
16473 TRACE_IRQS_OFF
16474+ pax_force_retaddr_bts
16475 ret
16476 CFI_ENDPROC
16477
16478@@ -1497,7 +1886,7 @@ error_kernelspace:
16479 cmpq $gs_change,RIP+8(%rsp)
16480 je error_swapgs
16481 jmp error_sti
16482-END(error_entry)
16483+ENDPROC(error_entry)
16484
16485
16486 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16487@@ -1517,7 +1906,7 @@ ENTRY(error_exit)
16488 jnz retint_careful
16489 jmp retint_swapgs
16490 CFI_ENDPROC
16491-END(error_exit)
16492+ENDPROC(error_exit)
16493
16494
16495 /* runs on exception stack */
16496@@ -1529,6 +1918,16 @@ ENTRY(nmi)
16497 CFI_ADJUST_CFA_OFFSET 15*8
16498 call save_paranoid
16499 DEFAULT_FRAME 0
16500+#ifdef CONFIG_PAX_MEMORY_UDEREF
16501+ testb $3, CS(%rsp)
16502+ jnz 1f
16503+ pax_enter_kernel
16504+ jmp 2f
16505+1: pax_enter_kernel_user
16506+2:
16507+#else
16508+ pax_enter_kernel
16509+#endif
16510 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16511 movq %rsp,%rdi
16512 movq $-1,%rsi
16513@@ -1539,12 +1938,28 @@ ENTRY(nmi)
16514 DISABLE_INTERRUPTS(CLBR_NONE)
16515 testl %ebx,%ebx /* swapgs needed? */
16516 jnz nmi_restore
16517- testl $3,CS(%rsp)
16518+ testb $3,CS(%rsp)
16519 jnz nmi_userspace
16520+#ifdef CONFIG_PAX_MEMORY_UDEREF
16521+ pax_exit_kernel
16522+ SWAPGS_UNSAFE_STACK
16523+ RESTORE_ALL 8
16524+ pax_force_retaddr_bts
16525+ jmp irq_return
16526+#endif
16527 nmi_swapgs:
16528+#ifdef CONFIG_PAX_MEMORY_UDEREF
16529+ pax_exit_kernel_user
16530+#else
16531+ pax_exit_kernel
16532+#endif
16533 SWAPGS_UNSAFE_STACK
16534+ RESTORE_ALL 8
16535+ jmp irq_return
16536 nmi_restore:
16537+ pax_exit_kernel
16538 RESTORE_ALL 8
16539+ pax_force_retaddr_bts
16540 jmp irq_return
16541 nmi_userspace:
16542 GET_THREAD_INFO(%rcx)
16543@@ -1573,14 +1988,14 @@ nmi_schedule:
16544 jmp paranoid_exit
16545 CFI_ENDPROC
16546 #endif
16547-END(nmi)
16548+ENDPROC(nmi)
16549
16550 ENTRY(ignore_sysret)
16551 CFI_STARTPROC
16552 mov $-ENOSYS,%eax
16553 sysret
16554 CFI_ENDPROC
16555-END(ignore_sysret)
16556+ENDPROC(ignore_sysret)
16557
16558 /*
16559 * End of kprobes section
16560diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16561index 9dbb527..7b3615a 100644
16562--- a/arch/x86/kernel/ftrace.c
16563+++ b/arch/x86/kernel/ftrace.c
16564@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16565 static void *mod_code_newcode; /* holds the text to write to the IP */
16566
16567 static unsigned nmi_wait_count;
16568-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16569+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16570
16571 int ftrace_arch_read_dyn_info(char *buf, int size)
16572 {
16573@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16574
16575 r = snprintf(buf, size, "%u %u",
16576 nmi_wait_count,
16577- atomic_read(&nmi_update_count));
16578+ atomic_read_unchecked(&nmi_update_count));
16579 return r;
16580 }
16581
16582@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16583 {
16584 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16585 smp_rmb();
16586+ pax_open_kernel();
16587 ftrace_mod_code();
16588- atomic_inc(&nmi_update_count);
16589+ pax_close_kernel();
16590+ atomic_inc_unchecked(&nmi_update_count);
16591 }
16592 /* Must have previous changes seen before executions */
16593 smp_mb();
16594@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16595
16596
16597
16598-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16599+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16600
16601 static unsigned char *ftrace_nop_replace(void)
16602 {
16603@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16604 {
16605 unsigned char replaced[MCOUNT_INSN_SIZE];
16606
16607+ ip = ktla_ktva(ip);
16608+
16609 /*
16610 * Note: Due to modules and __init, code can
16611 * disappear and change, we need to protect against faulting
16612@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16613 unsigned char old[MCOUNT_INSN_SIZE], *new;
16614 int ret;
16615
16616- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16617+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16618 new = ftrace_call_replace(ip, (unsigned long)func);
16619 ret = ftrace_modify_code(ip, old, new);
16620
16621@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16622 switch (faulted) {
16623 case 0:
16624 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16625- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16626+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16627 break;
16628 case 1:
16629 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16630- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16631+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16632 break;
16633 case 2:
16634 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16635- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16636+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16637 break;
16638 }
16639
16640@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16641 {
16642 unsigned char code[MCOUNT_INSN_SIZE];
16643
16644+ ip = ktla_ktva(ip);
16645+
16646 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16647 return -EFAULT;
16648
16649diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16650index 4f8e250..df24706 100644
16651--- a/arch/x86/kernel/head32.c
16652+++ b/arch/x86/kernel/head32.c
16653@@ -16,6 +16,7 @@
16654 #include <asm/apic.h>
16655 #include <asm/io_apic.h>
16656 #include <asm/bios_ebda.h>
16657+#include <asm/boot.h>
16658
16659 static void __init i386_default_early_setup(void)
16660 {
16661@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16662 {
16663 reserve_trampoline_memory();
16664
16665- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16666+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16667
16668 #ifdef CONFIG_BLK_DEV_INITRD
16669 /* Reserve INITRD */
16670diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16671index 34c3308..6fc4e76 100644
16672--- a/arch/x86/kernel/head_32.S
16673+++ b/arch/x86/kernel/head_32.S
16674@@ -19,10 +19,17 @@
16675 #include <asm/setup.h>
16676 #include <asm/processor-flags.h>
16677 #include <asm/percpu.h>
16678+#include <asm/msr-index.h>
16679
16680 /* Physical address */
16681 #define pa(X) ((X) - __PAGE_OFFSET)
16682
16683+#ifdef CONFIG_PAX_KERNEXEC
16684+#define ta(X) (X)
16685+#else
16686+#define ta(X) ((X) - __PAGE_OFFSET)
16687+#endif
16688+
16689 /*
16690 * References to members of the new_cpu_data structure.
16691 */
16692@@ -52,11 +59,7 @@
16693 * and small than max_low_pfn, otherwise will waste some page table entries
16694 */
16695
16696-#if PTRS_PER_PMD > 1
16697-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16698-#else
16699-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16700-#endif
16701+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16702
16703 /* Enough space to fit pagetables for the low memory linear map */
16704 MAPPING_BEYOND_END = \
16705@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16706 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16707
16708 /*
16709+ * Real beginning of normal "text" segment
16710+ */
16711+ENTRY(stext)
16712+ENTRY(_stext)
16713+
16714+/*
16715 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16716 * %esi points to the real-mode code as a 32-bit pointer.
16717 * CS and DS must be 4 GB flat segments, but we don't depend on
16718@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16719 * can.
16720 */
16721 __HEAD
16722+
16723+#ifdef CONFIG_PAX_KERNEXEC
16724+ jmp startup_32
16725+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16726+.fill PAGE_SIZE-5,1,0xcc
16727+#endif
16728+
16729 ENTRY(startup_32)
16730+ movl pa(stack_start),%ecx
16731+
16732 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16733 us to not reload segments */
16734 testb $(1<<6), BP_loadflags(%esi)
16735@@ -95,7 +113,60 @@ ENTRY(startup_32)
16736 movl %eax,%es
16737 movl %eax,%fs
16738 movl %eax,%gs
16739+ movl %eax,%ss
16740 2:
16741+ leal -__PAGE_OFFSET(%ecx),%esp
16742+
16743+#ifdef CONFIG_SMP
16744+ movl $pa(cpu_gdt_table),%edi
16745+ movl $__per_cpu_load,%eax
16746+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16747+ rorl $16,%eax
16748+ movb %al,__KERNEL_PERCPU + 4(%edi)
16749+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16750+ movl $__per_cpu_end - 1,%eax
16751+ subl $__per_cpu_start,%eax
16752+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16753+#endif
16754+
16755+#ifdef CONFIG_PAX_MEMORY_UDEREF
16756+ movl $NR_CPUS,%ecx
16757+ movl $pa(cpu_gdt_table),%edi
16758+1:
16759+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16760+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16761+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16762+ addl $PAGE_SIZE_asm,%edi
16763+ loop 1b
16764+#endif
16765+
16766+#ifdef CONFIG_PAX_KERNEXEC
16767+ movl $pa(boot_gdt),%edi
16768+ movl $__LOAD_PHYSICAL_ADDR,%eax
16769+ movw %ax,__BOOT_CS + 2(%edi)
16770+ rorl $16,%eax
16771+ movb %al,__BOOT_CS + 4(%edi)
16772+ movb %ah,__BOOT_CS + 7(%edi)
16773+ rorl $16,%eax
16774+
16775+ ljmp $(__BOOT_CS),$1f
16776+1:
16777+
16778+ movl $NR_CPUS,%ecx
16779+ movl $pa(cpu_gdt_table),%edi
16780+ addl $__PAGE_OFFSET,%eax
16781+1:
16782+ movw %ax,__KERNEL_CS + 2(%edi)
16783+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16784+ rorl $16,%eax
16785+ movb %al,__KERNEL_CS + 4(%edi)
16786+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16787+ movb %ah,__KERNEL_CS + 7(%edi)
16788+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16789+ rorl $16,%eax
16790+ addl $PAGE_SIZE_asm,%edi
16791+ loop 1b
16792+#endif
16793
16794 /*
16795 * Clear BSS first so that there are no surprises...
16796@@ -140,9 +211,7 @@ ENTRY(startup_32)
16797 cmpl $num_subarch_entries, %eax
16798 jae bad_subarch
16799
16800- movl pa(subarch_entries)(,%eax,4), %eax
16801- subl $__PAGE_OFFSET, %eax
16802- jmp *%eax
16803+ jmp *pa(subarch_entries)(,%eax,4)
16804
16805 bad_subarch:
16806 WEAK(lguest_entry)
16807@@ -154,10 +223,10 @@ WEAK(xen_entry)
16808 __INITDATA
16809
16810 subarch_entries:
16811- .long default_entry /* normal x86/PC */
16812- .long lguest_entry /* lguest hypervisor */
16813- .long xen_entry /* Xen hypervisor */
16814- .long default_entry /* Moorestown MID */
16815+ .long ta(default_entry) /* normal x86/PC */
16816+ .long ta(lguest_entry) /* lguest hypervisor */
16817+ .long ta(xen_entry) /* Xen hypervisor */
16818+ .long ta(default_entry) /* Moorestown MID */
16819 num_subarch_entries = (. - subarch_entries) / 4
16820 .previous
16821 #endif /* CONFIG_PARAVIRT */
16822@@ -218,8 +287,11 @@ default_entry:
16823 movl %eax, pa(max_pfn_mapped)
16824
16825 /* Do early initialization of the fixmap area */
16826- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16827- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16828+#ifdef CONFIG_COMPAT_VDSO
16829+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16830+#else
16831+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16832+#endif
16833 #else /* Not PAE */
16834
16835 page_pde_offset = (__PAGE_OFFSET >> 20);
16836@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16837 movl %eax, pa(max_pfn_mapped)
16838
16839 /* Do early initialization of the fixmap area */
16840- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16841- movl %eax,pa(swapper_pg_dir+0xffc)
16842+#ifdef CONFIG_COMPAT_VDSO
16843+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16844+#else
16845+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16846+#endif
16847 #endif
16848 jmp 3f
16849 /*
16850@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16851 movl %eax,%es
16852 movl %eax,%fs
16853 movl %eax,%gs
16854+ movl pa(stack_start),%ecx
16855+ movl %eax,%ss
16856+ leal -__PAGE_OFFSET(%ecx),%esp
16857 #endif /* CONFIG_SMP */
16858 3:
16859
16860@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16861 orl %edx,%eax
16862 movl %eax,%cr4
16863
16864+#ifdef CONFIG_X86_PAE
16865 btl $5, %eax # check if PAE is enabled
16866 jnc 6f
16867
16868@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16869 cpuid
16870 cmpl $0x80000000, %eax
16871 jbe 6f
16872+
16873+ /* Clear bogus XD_DISABLE bits */
16874+ call verify_cpu
16875+
16876 mov $0x80000001, %eax
16877 cpuid
16878 /* Execute Disable bit supported? */
16879@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16880 jnc 6f
16881
16882 /* Setup EFER (Extended Feature Enable Register) */
16883- movl $0xc0000080, %ecx
16884+ movl $MSR_EFER, %ecx
16885 rdmsr
16886
16887 btsl $11, %eax
16888 /* Make changes effective */
16889 wrmsr
16890
16891+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16892+ movl $1,pa(nx_enabled)
16893+#endif
16894+
16895 6:
16896
16897 /*
16898@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16899 movl %eax,%cr0 /* ..and set paging (PG) bit */
16900 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16901 1:
16902- /* Set up the stack pointer */
16903- lss stack_start,%esp
16904+ /* Shift the stack pointer to a virtual address */
16905+ addl $__PAGE_OFFSET, %esp
16906
16907 /*
16908 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16909@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16910
16911 #ifdef CONFIG_SMP
16912 cmpb $0, ready
16913- jz 1f /* Initial CPU cleans BSS */
16914- jmp checkCPUtype
16915-1:
16916+ jnz checkCPUtype
16917 #endif /* CONFIG_SMP */
16918
16919 /*
16920@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
16921 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16922 movl %eax,%ss # after changing gdt.
16923
16924- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16925+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16926 movl %eax,%ds
16927 movl %eax,%es
16928
16929@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
16930 */
16931 cmpb $0,ready
16932 jne 1f
16933- movl $per_cpu__gdt_page,%eax
16934+ movl $cpu_gdt_table,%eax
16935 movl $per_cpu__stack_canary,%ecx
16936+#ifdef CONFIG_SMP
16937+ addl $__per_cpu_load,%ecx
16938+#endif
16939 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16940 shrl $16, %ecx
16941 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16942 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16943 1:
16944-#endif
16945 movl $(__KERNEL_STACK_CANARY),%eax
16946+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16947+ movl $(__USER_DS),%eax
16948+#else
16949+ xorl %eax,%eax
16950+#endif
16951 movl %eax,%gs
16952
16953 xorl %eax,%eax # Clear LDT
16954@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
16955
16956 cld # gcc2 wants the direction flag cleared at all times
16957 pushl $0 # fake return address for unwinder
16958-#ifdef CONFIG_SMP
16959- movb ready, %cl
16960 movb $1, ready
16961- cmpb $0,%cl # the first CPU calls start_kernel
16962- je 1f
16963- movl (stack_start), %esp
16964-1:
16965-#endif /* CONFIG_SMP */
16966 jmp *(initial_code)
16967
16968 /*
16969@@ -546,22 +631,22 @@ early_page_fault:
16970 jmp early_fault
16971
16972 early_fault:
16973- cld
16974 #ifdef CONFIG_PRINTK
16975+ cmpl $1,%ss:early_recursion_flag
16976+ je hlt_loop
16977+ incl %ss:early_recursion_flag
16978+ cld
16979 pusha
16980 movl $(__KERNEL_DS),%eax
16981 movl %eax,%ds
16982 movl %eax,%es
16983- cmpl $2,early_recursion_flag
16984- je hlt_loop
16985- incl early_recursion_flag
16986 movl %cr2,%eax
16987 pushl %eax
16988 pushl %edx /* trapno */
16989 pushl $fault_msg
16990 call printk
16991+; call dump_stack
16992 #endif
16993- call dump_stack
16994 hlt_loop:
16995 hlt
16996 jmp hlt_loop
16997@@ -569,8 +654,11 @@ hlt_loop:
16998 /* This is the default interrupt "handler" :-) */
16999 ALIGN
17000 ignore_int:
17001- cld
17002 #ifdef CONFIG_PRINTK
17003+ cmpl $2,%ss:early_recursion_flag
17004+ je hlt_loop
17005+ incl %ss:early_recursion_flag
17006+ cld
17007 pushl %eax
17008 pushl %ecx
17009 pushl %edx
17010@@ -579,9 +667,6 @@ ignore_int:
17011 movl $(__KERNEL_DS),%eax
17012 movl %eax,%ds
17013 movl %eax,%es
17014- cmpl $2,early_recursion_flag
17015- je hlt_loop
17016- incl early_recursion_flag
17017 pushl 16(%esp)
17018 pushl 24(%esp)
17019 pushl 32(%esp)
17020@@ -600,6 +685,8 @@ ignore_int:
17021 #endif
17022 iret
17023
17024+#include "verify_cpu.S"
17025+
17026 __REFDATA
17027 .align 4
17028 ENTRY(initial_code)
17029@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17030 /*
17031 * BSS section
17032 */
17033-__PAGE_ALIGNED_BSS
17034- .align PAGE_SIZE_asm
17035 #ifdef CONFIG_X86_PAE
17036+.section .swapper_pg_pmd,"a",@progbits
17037 swapper_pg_pmd:
17038 .fill 1024*KPMDS,4,0
17039 #else
17040+.section .swapper_pg_dir,"a",@progbits
17041 ENTRY(swapper_pg_dir)
17042 .fill 1024,4,0
17043 #endif
17044+.section .swapper_pg_fixmap,"a",@progbits
17045 swapper_pg_fixmap:
17046 .fill 1024,4,0
17047 #ifdef CONFIG_X86_TRAMPOLINE
17048+.section .trampoline_pg_dir,"a",@progbits
17049 ENTRY(trampoline_pg_dir)
17050+#ifdef CONFIG_X86_PAE
17051+ .fill 4,8,0
17052+#else
17053 .fill 1024,4,0
17054 #endif
17055+#endif
17056+
17057+.section .empty_zero_page,"a",@progbits
17058 ENTRY(empty_zero_page)
17059 .fill 4096,1,0
17060
17061 /*
17062+ * The IDT has to be page-aligned to simplify the Pentium
17063+ * F0 0F bug workaround.. We have a special link segment
17064+ * for this.
17065+ */
17066+.section .idt,"a",@progbits
17067+ENTRY(idt_table)
17068+ .fill 256,8,0
17069+
17070+/*
17071 * This starts the data section.
17072 */
17073 #ifdef CONFIG_X86_PAE
17074-__PAGE_ALIGNED_DATA
17075- /* Page-aligned for the benefit of paravirt? */
17076- .align PAGE_SIZE_asm
17077+.section .swapper_pg_dir,"a",@progbits
17078+
17079 ENTRY(swapper_pg_dir)
17080 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17081 # if KPMDS == 3
17082@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17083 # error "Kernel PMDs should be 1, 2 or 3"
17084 # endif
17085 .align PAGE_SIZE_asm /* needs to be page-sized too */
17086+
17087+#ifdef CONFIG_PAX_PER_CPU_PGD
17088+ENTRY(cpu_pgd)
17089+ .rept NR_CPUS
17090+ .fill 4,8,0
17091+ .endr
17092+#endif
17093+
17094 #endif
17095
17096 .data
17097+.balign 4
17098 ENTRY(stack_start)
17099- .long init_thread_union+THREAD_SIZE
17100- .long __BOOT_DS
17101+ .long init_thread_union+THREAD_SIZE-8
17102
17103 ready: .byte 0
17104
17105+.section .rodata,"a",@progbits
17106 early_recursion_flag:
17107 .long 0
17108
17109@@ -697,7 +809,7 @@ fault_msg:
17110 .word 0 # 32 bit align gdt_desc.address
17111 boot_gdt_descr:
17112 .word __BOOT_DS+7
17113- .long boot_gdt - __PAGE_OFFSET
17114+ .long pa(boot_gdt)
17115
17116 .word 0 # 32-bit align idt_desc.address
17117 idt_descr:
17118@@ -708,7 +820,7 @@ idt_descr:
17119 .word 0 # 32 bit align gdt_desc.address
17120 ENTRY(early_gdt_descr)
17121 .word GDT_ENTRIES*8-1
17122- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17123+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17124
17125 /*
17126 * The boot_gdt must mirror the equivalent in setup.S and is
17127@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17128 .align L1_CACHE_BYTES
17129 ENTRY(boot_gdt)
17130 .fill GDT_ENTRY_BOOT_CS,8,0
17131- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17132- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17133+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17134+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17135+
17136+ .align PAGE_SIZE_asm
17137+ENTRY(cpu_gdt_table)
17138+ .rept NR_CPUS
17139+ .quad 0x0000000000000000 /* NULL descriptor */
17140+ .quad 0x0000000000000000 /* 0x0b reserved */
17141+ .quad 0x0000000000000000 /* 0x13 reserved */
17142+ .quad 0x0000000000000000 /* 0x1b reserved */
17143+
17144+#ifdef CONFIG_PAX_KERNEXEC
17145+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17146+#else
17147+ .quad 0x0000000000000000 /* 0x20 unused */
17148+#endif
17149+
17150+ .quad 0x0000000000000000 /* 0x28 unused */
17151+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17152+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17153+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17154+ .quad 0x0000000000000000 /* 0x4b reserved */
17155+ .quad 0x0000000000000000 /* 0x53 reserved */
17156+ .quad 0x0000000000000000 /* 0x5b reserved */
17157+
17158+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17159+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17160+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17161+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17162+
17163+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17164+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17165+
17166+ /*
17167+ * Segments used for calling PnP BIOS have byte granularity.
17168+ * The code segments and data segments have fixed 64k limits,
17169+ * the transfer segment sizes are set at run time.
17170+ */
17171+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17172+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17173+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17174+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17175+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17176+
17177+ /*
17178+ * The APM segments have byte granularity and their bases
17179+ * are set at run time. All have 64k limits.
17180+ */
17181+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17182+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17183+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17184+
17185+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17186+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17187+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17188+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17189+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17190+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17191+
17192+ /* Be sure this is zeroed to avoid false validations in Xen */
17193+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17194+ .endr
17195diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17196index 780cd92..564ca35 100644
17197--- a/arch/x86/kernel/head_64.S
17198+++ b/arch/x86/kernel/head_64.S
17199@@ -19,6 +19,8 @@
17200 #include <asm/cache.h>
17201 #include <asm/processor-flags.h>
17202 #include <asm/percpu.h>
17203+#include <asm/cpufeature.h>
17204+#include <asm/alternative-asm.h>
17205
17206 #ifdef CONFIG_PARAVIRT
17207 #include <asm/asm-offsets.h>
17208@@ -38,6 +40,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17209 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17210 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17211 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17212+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17213+L3_VMALLOC_START = pud_index(VMALLOC_START)
17214+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17215+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17216
17217 .text
17218 __HEAD
17219@@ -85,35 +91,22 @@ startup_64:
17220 */
17221 addq %rbp, init_level4_pgt + 0(%rip)
17222 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17223+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17224+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17225 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17226
17227 addq %rbp, level3_ident_pgt + 0(%rip)
17228+#ifndef CONFIG_XEN
17229+ addq %rbp, level3_ident_pgt + 8(%rip)
17230+#endif
17231
17232- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17233- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17234-
17235- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17236-
17237- /* Add an Identity mapping if I am above 1G */
17238- leaq _text(%rip), %rdi
17239- andq $PMD_PAGE_MASK, %rdi
17240-
17241- movq %rdi, %rax
17242- shrq $PUD_SHIFT, %rax
17243- andq $(PTRS_PER_PUD - 1), %rax
17244- jz ident_complete
17245+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17246
17247- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17248- leaq level3_ident_pgt(%rip), %rbx
17249- movq %rdx, 0(%rbx, %rax, 8)
17250+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17251+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17252
17253- movq %rdi, %rax
17254- shrq $PMD_SHIFT, %rax
17255- andq $(PTRS_PER_PMD - 1), %rax
17256- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17257- leaq level2_spare_pgt(%rip), %rbx
17258- movq %rdx, 0(%rbx, %rax, 8)
17259-ident_complete:
17260+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17261+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17262
17263 /*
17264 * Fixup the kernel text+data virtual addresses. Note that
17265@@ -161,8 +154,8 @@ ENTRY(secondary_startup_64)
17266 * after the boot processor executes this code.
17267 */
17268
17269- /* Enable PAE mode and PGE */
17270- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17271+ /* Enable PAE mode and PSE/PGE */
17272+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17273 movq %rax, %cr4
17274
17275 /* Setup early boot stage 4 level pagetables. */
17276@@ -184,9 +177,15 @@ ENTRY(secondary_startup_64)
17277 movl $MSR_EFER, %ecx
17278 rdmsr
17279 btsl $_EFER_SCE, %eax /* Enable System Call */
17280- btl $20,%edi /* No Execute supported? */
17281+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17282 jnc 1f
17283 btsl $_EFER_NX, %eax
17284+ leaq init_level4_pgt(%rip), %rdi
17285+#ifndef CONFIG_EFI
17286+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17287+#endif
17288+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17289+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17290 1: wrmsr /* Make changes effective */
17291
17292 /* Setup cr0 */
17293@@ -249,6 +248,7 @@ ENTRY(secondary_startup_64)
17294 * jump. In addition we need to ensure %cs is set so we make this
17295 * a far return.
17296 */
17297+ pax_set_fptr_mask
17298 movq initial_code(%rip),%rax
17299 pushq $0 # fake return address to stop unwinder
17300 pushq $__KERNEL_CS # set correct cs
17301@@ -262,16 +262,16 @@ ENTRY(secondary_startup_64)
17302 .quad x86_64_start_kernel
17303 ENTRY(initial_gs)
17304 .quad INIT_PER_CPU_VAR(irq_stack_union)
17305- __FINITDATA
17306
17307 ENTRY(stack_start)
17308 .quad init_thread_union+THREAD_SIZE-8
17309 .word 0
17310+ __FINITDATA
17311
17312 bad_address:
17313 jmp bad_address
17314
17315- .section ".init.text","ax"
17316+ __INIT
17317 #ifdef CONFIG_EARLY_PRINTK
17318 .globl early_idt_handlers
17319 early_idt_handlers:
17320@@ -316,18 +316,23 @@ ENTRY(early_idt_handler)
17321 #endif /* EARLY_PRINTK */
17322 1: hlt
17323 jmp 1b
17324+ .previous
17325
17326 #ifdef CONFIG_EARLY_PRINTK
17327+ __INITDATA
17328 early_recursion_flag:
17329 .long 0
17330+ .previous
17331
17332+ .section .rodata,"a",@progbits
17333 early_idt_msg:
17334 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17335 early_idt_ripmsg:
17336 .asciz "RIP %s\n"
17337-#endif /* CONFIG_EARLY_PRINTK */
17338 .previous
17339+#endif /* CONFIG_EARLY_PRINTK */
17340
17341+ .section .rodata,"a",@progbits
17342 #define NEXT_PAGE(name) \
17343 .balign PAGE_SIZE; \
17344 ENTRY(name)
17345@@ -350,13 +355,36 @@ NEXT_PAGE(init_level4_pgt)
17346 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17347 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17348 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17349+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17350+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
17351+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17352+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17353 .org init_level4_pgt + L4_START_KERNEL*8, 0
17354 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17355 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17356
17357+#ifdef CONFIG_PAX_PER_CPU_PGD
17358+NEXT_PAGE(cpu_pgd)
17359+ .rept NR_CPUS
17360+ .fill 512,8,0
17361+ .endr
17362+#endif
17363+
17364 NEXT_PAGE(level3_ident_pgt)
17365 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17366+#ifdef CONFIG_XEN
17367 .fill 511,8,0
17368+#else
17369+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17370+ .fill 510,8,0
17371+#endif
17372+
17373+NEXT_PAGE(level3_vmalloc_pgt)
17374+ .fill 512,8,0
17375+
17376+NEXT_PAGE(level3_vmemmap_pgt)
17377+ .fill L3_VMEMMAP_START,8,0
17378+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17379
17380 NEXT_PAGE(level3_kernel_pgt)
17381 .fill L3_START_KERNEL,8,0
17382@@ -364,20 +392,23 @@ NEXT_PAGE(level3_kernel_pgt)
17383 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17384 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17385
17386+NEXT_PAGE(level2_vmemmap_pgt)
17387+ .fill 512,8,0
17388+
17389 NEXT_PAGE(level2_fixmap_pgt)
17390- .fill 506,8,0
17391- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17392- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17393- .fill 5,8,0
17394+ .fill 507,8,0
17395+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17396+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17397+ .fill 4,8,0
17398
17399-NEXT_PAGE(level1_fixmap_pgt)
17400+NEXT_PAGE(level1_vsyscall_pgt)
17401 .fill 512,8,0
17402
17403-NEXT_PAGE(level2_ident_pgt)
17404- /* Since I easily can, map the first 1G.
17405+ /* Since I easily can, map the first 2G.
17406 * Don't set NX because code runs from these pages.
17407 */
17408- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17409+NEXT_PAGE(level2_ident_pgt)
17410+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17411
17412 NEXT_PAGE(level2_kernel_pgt)
17413 /*
17414@@ -390,33 +421,55 @@ NEXT_PAGE(level2_kernel_pgt)
17415 * If you want to increase this then increase MODULES_VADDR
17416 * too.)
17417 */
17418- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17419- KERNEL_IMAGE_SIZE/PMD_SIZE)
17420-
17421-NEXT_PAGE(level2_spare_pgt)
17422- .fill 512, 8, 0
17423+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17424
17425 #undef PMDS
17426 #undef NEXT_PAGE
17427
17428- .data
17429+ .align PAGE_SIZE
17430+ENTRY(cpu_gdt_table)
17431+ .rept NR_CPUS
17432+ .quad 0x0000000000000000 /* NULL descriptor */
17433+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17434+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17435+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17436+ .quad 0x00cffb000000ffff /* __USER32_CS */
17437+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17438+ .quad 0x00affb000000ffff /* __USER_CS */
17439+
17440+#ifdef CONFIG_PAX_KERNEXEC
17441+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17442+#else
17443+ .quad 0x0 /* unused */
17444+#endif
17445+
17446+ .quad 0,0 /* TSS */
17447+ .quad 0,0 /* LDT */
17448+ .quad 0,0,0 /* three TLS descriptors */
17449+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17450+ /* asm/segment.h:GDT_ENTRIES must match this */
17451+
17452+ /* zero the remaining page */
17453+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17454+ .endr
17455+
17456 .align 16
17457 .globl early_gdt_descr
17458 early_gdt_descr:
17459 .word GDT_ENTRIES*8-1
17460 early_gdt_descr_base:
17461- .quad INIT_PER_CPU_VAR(gdt_page)
17462+ .quad cpu_gdt_table
17463
17464 ENTRY(phys_base)
17465 /* This must match the first entry in level2_kernel_pgt */
17466 .quad 0x0000000000000000
17467
17468 #include "../../x86/xen/xen-head.S"
17469-
17470- .section .bss, "aw", @nobits
17471+
17472+ .section .rodata,"a",@progbits
17473 .align L1_CACHE_BYTES
17474 ENTRY(idt_table)
17475- .skip IDT_ENTRIES * 16
17476+ .fill 512,8,0
17477
17478 __PAGE_ALIGNED_BSS
17479 .align PAGE_SIZE
17480diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17481index 9c3bd4a..e1d9b35 100644
17482--- a/arch/x86/kernel/i386_ksyms_32.c
17483+++ b/arch/x86/kernel/i386_ksyms_32.c
17484@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17485 EXPORT_SYMBOL(cmpxchg8b_emu);
17486 #endif
17487
17488+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17489+
17490 /* Networking helper routines. */
17491 EXPORT_SYMBOL(csum_partial_copy_generic);
17492+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17493+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17494
17495 EXPORT_SYMBOL(__get_user_1);
17496 EXPORT_SYMBOL(__get_user_2);
17497@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17498
17499 EXPORT_SYMBOL(csum_partial);
17500 EXPORT_SYMBOL(empty_zero_page);
17501+
17502+#ifdef CONFIG_PAX_KERNEXEC
17503+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17504+#endif
17505diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17506index df89102..a244320 100644
17507--- a/arch/x86/kernel/i8259.c
17508+++ b/arch/x86/kernel/i8259.c
17509@@ -208,7 +208,7 @@ spurious_8259A_irq:
17510 "spurious 8259A interrupt: IRQ%d.\n", irq);
17511 spurious_irq_mask |= irqmask;
17512 }
17513- atomic_inc(&irq_err_count);
17514+ atomic_inc_unchecked(&irq_err_count);
17515 /*
17516 * Theoretically we do not have to handle this IRQ,
17517 * but in Linux this does not cause problems and is
17518diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17519index 3a54dcb..1c22348 100644
17520--- a/arch/x86/kernel/init_task.c
17521+++ b/arch/x86/kernel/init_task.c
17522@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17523 * way process stacks are handled. This is done by having a special
17524 * "init_task" linker map entry..
17525 */
17526-union thread_union init_thread_union __init_task_data =
17527- { INIT_THREAD_INFO(init_task) };
17528+union thread_union init_thread_union __init_task_data;
17529
17530 /*
17531 * Initial task structure.
17532@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17533 * section. Since TSS's are completely CPU-local, we want them
17534 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17535 */
17536-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17537-
17538+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17539+EXPORT_SYMBOL(init_tss);
17540diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17541index 99c4d30..74c84e9 100644
17542--- a/arch/x86/kernel/ioport.c
17543+++ b/arch/x86/kernel/ioport.c
17544@@ -6,6 +6,7 @@
17545 #include <linux/sched.h>
17546 #include <linux/kernel.h>
17547 #include <linux/capability.h>
17548+#include <linux/security.h>
17549 #include <linux/errno.h>
17550 #include <linux/types.h>
17551 #include <linux/ioport.h>
17552@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17553
17554 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17555 return -EINVAL;
17556+#ifdef CONFIG_GRKERNSEC_IO
17557+ if (turn_on && grsec_disable_privio) {
17558+ gr_handle_ioperm();
17559+ return -EPERM;
17560+ }
17561+#endif
17562 if (turn_on && !capable(CAP_SYS_RAWIO))
17563 return -EPERM;
17564
17565@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17566 * because the ->io_bitmap_max value must match the bitmap
17567 * contents:
17568 */
17569- tss = &per_cpu(init_tss, get_cpu());
17570+ tss = init_tss + get_cpu();
17571
17572 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17573
17574@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17575 return -EINVAL;
17576 /* Trying to gain more privileges? */
17577 if (level > old) {
17578+#ifdef CONFIG_GRKERNSEC_IO
17579+ if (grsec_disable_privio) {
17580+ gr_handle_iopl();
17581+ return -EPERM;
17582+ }
17583+#endif
17584 if (!capable(CAP_SYS_RAWIO))
17585 return -EPERM;
17586 }
17587diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17588index 04bbd52..83a07d9 100644
17589--- a/arch/x86/kernel/irq.c
17590+++ b/arch/x86/kernel/irq.c
17591@@ -15,7 +15,7 @@
17592 #include <asm/mce.h>
17593 #include <asm/hw_irq.h>
17594
17595-atomic_t irq_err_count;
17596+atomic_unchecked_t irq_err_count;
17597
17598 /* Function pointer for generic interrupt vector handling */
17599 void (*generic_interrupt_extension)(void) = NULL;
17600@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17601 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17602 seq_printf(p, " Machine check polls\n");
17603 #endif
17604- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17605+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17606 #if defined(CONFIG_X86_IO_APIC)
17607- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17608+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17609 #endif
17610 return 0;
17611 }
17612@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17613
17614 u64 arch_irq_stat(void)
17615 {
17616- u64 sum = atomic_read(&irq_err_count);
17617+ u64 sum = atomic_read_unchecked(&irq_err_count);
17618
17619 #ifdef CONFIG_X86_IO_APIC
17620- sum += atomic_read(&irq_mis_count);
17621+ sum += atomic_read_unchecked(&irq_mis_count);
17622 #endif
17623 return sum;
17624 }
17625diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17626index 7d35d0f..03f1d52 100644
17627--- a/arch/x86/kernel/irq_32.c
17628+++ b/arch/x86/kernel/irq_32.c
17629@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17630 __asm__ __volatile__("andl %%esp,%0" :
17631 "=r" (sp) : "0" (THREAD_SIZE - 1));
17632
17633- return sp < (sizeof(struct thread_info) + STACK_WARN);
17634+ return sp < STACK_WARN;
17635 }
17636
17637 static void print_stack_overflow(void)
17638@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17639 * per-CPU IRQ handling contexts (thread information and stack)
17640 */
17641 union irq_ctx {
17642- struct thread_info tinfo;
17643- u32 stack[THREAD_SIZE/sizeof(u32)];
17644-} __attribute__((aligned(PAGE_SIZE)));
17645+ unsigned long previous_esp;
17646+ u32 stack[THREAD_SIZE/sizeof(u32)];
17647+} __attribute__((aligned(THREAD_SIZE)));
17648
17649 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17650 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17651@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17652 static inline int
17653 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17654 {
17655- union irq_ctx *curctx, *irqctx;
17656+ union irq_ctx *irqctx;
17657 u32 *isp, arg1, arg2;
17658
17659- curctx = (union irq_ctx *) current_thread_info();
17660 irqctx = __get_cpu_var(hardirq_ctx);
17661
17662 /*
17663@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17664 * handler) we can't do that and just have to keep using the
17665 * current stack (which is the irq stack already after all)
17666 */
17667- if (unlikely(curctx == irqctx))
17668+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17669 return 0;
17670
17671 /* build the stack frame on the IRQ stack */
17672- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17673- irqctx->tinfo.task = curctx->tinfo.task;
17674- irqctx->tinfo.previous_esp = current_stack_pointer;
17675+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17676+ irqctx->previous_esp = current_stack_pointer;
17677
17678- /*
17679- * Copy the softirq bits in preempt_count so that the
17680- * softirq checks work in the hardirq context.
17681- */
17682- irqctx->tinfo.preempt_count =
17683- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17684- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17685+#ifdef CONFIG_PAX_MEMORY_UDEREF
17686+ __set_fs(MAKE_MM_SEG(0));
17687+#endif
17688
17689 if (unlikely(overflow))
17690 call_on_stack(print_stack_overflow, isp);
17691@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17692 : "0" (irq), "1" (desc), "2" (isp),
17693 "D" (desc->handle_irq)
17694 : "memory", "cc", "ecx");
17695+
17696+#ifdef CONFIG_PAX_MEMORY_UDEREF
17697+ __set_fs(current_thread_info()->addr_limit);
17698+#endif
17699+
17700 return 1;
17701 }
17702
17703@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17704 */
17705 void __cpuinit irq_ctx_init(int cpu)
17706 {
17707- union irq_ctx *irqctx;
17708-
17709 if (per_cpu(hardirq_ctx, cpu))
17710 return;
17711
17712- irqctx = &per_cpu(hardirq_stack, cpu);
17713- irqctx->tinfo.task = NULL;
17714- irqctx->tinfo.exec_domain = NULL;
17715- irqctx->tinfo.cpu = cpu;
17716- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17717- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17718-
17719- per_cpu(hardirq_ctx, cpu) = irqctx;
17720-
17721- irqctx = &per_cpu(softirq_stack, cpu);
17722- irqctx->tinfo.task = NULL;
17723- irqctx->tinfo.exec_domain = NULL;
17724- irqctx->tinfo.cpu = cpu;
17725- irqctx->tinfo.preempt_count = 0;
17726- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17727-
17728- per_cpu(softirq_ctx, cpu) = irqctx;
17729+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17730+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17731
17732 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17733 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17734@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17735 asmlinkage void do_softirq(void)
17736 {
17737 unsigned long flags;
17738- struct thread_info *curctx;
17739 union irq_ctx *irqctx;
17740 u32 *isp;
17741
17742@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17743 local_irq_save(flags);
17744
17745 if (local_softirq_pending()) {
17746- curctx = current_thread_info();
17747 irqctx = __get_cpu_var(softirq_ctx);
17748- irqctx->tinfo.task = curctx->task;
17749- irqctx->tinfo.previous_esp = current_stack_pointer;
17750+ irqctx->previous_esp = current_stack_pointer;
17751
17752 /* build the stack frame on the softirq stack */
17753- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17754+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17755+
17756+#ifdef CONFIG_PAX_MEMORY_UDEREF
17757+ __set_fs(MAKE_MM_SEG(0));
17758+#endif
17759
17760 call_on_stack(__do_softirq, isp);
17761+
17762+#ifdef CONFIG_PAX_MEMORY_UDEREF
17763+ __set_fs(current_thread_info()->addr_limit);
17764+#endif
17765+
17766 /*
17767 * Shouldnt happen, we returned above if in_interrupt():
17768 */
17769diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17770index 8d82a77..0baf312 100644
17771--- a/arch/x86/kernel/kgdb.c
17772+++ b/arch/x86/kernel/kgdb.c
17773@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17774
17775 /* clear the trace bit */
17776 linux_regs->flags &= ~X86_EFLAGS_TF;
17777- atomic_set(&kgdb_cpu_doing_single_step, -1);
17778+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17779
17780 /* set the trace bit if we're stepping */
17781 if (remcomInBuffer[0] == 's') {
17782 linux_regs->flags |= X86_EFLAGS_TF;
17783 kgdb_single_step = 1;
17784- atomic_set(&kgdb_cpu_doing_single_step,
17785+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17786 raw_smp_processor_id());
17787 }
17788
17789@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17790 break;
17791
17792 case DIE_DEBUG:
17793- if (atomic_read(&kgdb_cpu_doing_single_step) ==
17794+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17795 raw_smp_processor_id()) {
17796 if (user_mode(regs))
17797 return single_step_cont(regs, args);
17798@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17799 return instruction_pointer(regs);
17800 }
17801
17802-struct kgdb_arch arch_kgdb_ops = {
17803+const struct kgdb_arch arch_kgdb_ops = {
17804 /* Breakpoint instruction: */
17805 .gdb_bpt_instr = { 0xcc },
17806 .flags = KGDB_HW_BREAKPOINT,
17807diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17808index 7a67820..8d15b75 100644
17809--- a/arch/x86/kernel/kprobes.c
17810+++ b/arch/x86/kernel/kprobes.c
17811@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17812 char op;
17813 s32 raddr;
17814 } __attribute__((packed)) * jop;
17815- jop = (struct __arch_jmp_op *)from;
17816+
17817+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17818+
17819+ pax_open_kernel();
17820 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17821 jop->op = RELATIVEJUMP_INSTRUCTION;
17822+ pax_close_kernel();
17823 }
17824
17825 /*
17826@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17827 kprobe_opcode_t opcode;
17828 kprobe_opcode_t *orig_opcodes = opcodes;
17829
17830- if (search_exception_tables((unsigned long)opcodes))
17831+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17832 return 0; /* Page fault may occur on this address. */
17833
17834 retry:
17835@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17836 disp = (u8 *) p->addr + *((s32 *) insn) -
17837 (u8 *) p->ainsn.insn;
17838 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17839+ pax_open_kernel();
17840 *(s32 *)insn = (s32) disp;
17841+ pax_close_kernel();
17842 }
17843 }
17844 #endif
17845@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17846
17847 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17848 {
17849- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17850+ pax_open_kernel();
17851+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17852+ pax_close_kernel();
17853
17854 fix_riprel(p);
17855
17856- if (can_boost(p->addr))
17857+ if (can_boost(ktla_ktva(p->addr)))
17858 p->ainsn.boostable = 0;
17859 else
17860 p->ainsn.boostable = -1;
17861
17862- p->opcode = *p->addr;
17863+ p->opcode = *(ktla_ktva(p->addr));
17864 }
17865
17866 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17867@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17868 if (p->opcode == BREAKPOINT_INSTRUCTION)
17869 regs->ip = (unsigned long)p->addr;
17870 else
17871- regs->ip = (unsigned long)p->ainsn.insn;
17872+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17873 }
17874
17875 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17876@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17877 if (p->ainsn.boostable == 1 && !p->post_handler) {
17878 /* Boost up -- we can execute copied instructions directly */
17879 reset_current_kprobe();
17880- regs->ip = (unsigned long)p->ainsn.insn;
17881+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17882 preempt_enable_no_resched();
17883 return;
17884 }
17885@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17886 struct kprobe_ctlblk *kcb;
17887
17888 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17889- if (*addr != BREAKPOINT_INSTRUCTION) {
17890+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17891 /*
17892 * The breakpoint instruction was removed right
17893 * after we hit it. Another cpu has removed
17894@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17895 /* Skip orig_ax, ip, cs */
17896 " addq $24, %rsp\n"
17897 " popfq\n"
17898+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17899+ " btsq $63,(%rsp)\n"
17900+#endif
17901 #else
17902 " pushf\n"
17903 /*
17904@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17905 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17906 {
17907 unsigned long *tos = stack_addr(regs);
17908- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17909+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17910 unsigned long orig_ip = (unsigned long)p->addr;
17911 kprobe_opcode_t *insn = p->ainsn.insn;
17912
17913@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17914 struct die_args *args = data;
17915 int ret = NOTIFY_DONE;
17916
17917- if (args->regs && user_mode_vm(args->regs))
17918+ if (args->regs && user_mode(args->regs))
17919 return ret;
17920
17921 switch (val) {
17922diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17923index 63b0ec8..6d92227 100644
17924--- a/arch/x86/kernel/kvm.c
17925+++ b/arch/x86/kernel/kvm.c
17926@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
17927 pv_mmu_ops.set_pud = kvm_set_pud;
17928 #if PAGETABLE_LEVELS == 4
17929 pv_mmu_ops.set_pgd = kvm_set_pgd;
17930+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
17931 #endif
17932 #endif
17933 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
17934diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17935index ec6ef60..ab2c824 100644
17936--- a/arch/x86/kernel/ldt.c
17937+++ b/arch/x86/kernel/ldt.c
17938@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17939 if (reload) {
17940 #ifdef CONFIG_SMP
17941 preempt_disable();
17942- load_LDT(pc);
17943+ load_LDT_nolock(pc);
17944 if (!cpumask_equal(mm_cpumask(current->mm),
17945 cpumask_of(smp_processor_id())))
17946 smp_call_function(flush_ldt, current->mm, 1);
17947 preempt_enable();
17948 #else
17949- load_LDT(pc);
17950+ load_LDT_nolock(pc);
17951 #endif
17952 }
17953 if (oldsize) {
17954@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17955 return err;
17956
17957 for (i = 0; i < old->size; i++)
17958- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17959+ write_ldt_entry(new->ldt, i, old->ldt + i);
17960 return 0;
17961 }
17962
17963@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17964 retval = copy_ldt(&mm->context, &old_mm->context);
17965 mutex_unlock(&old_mm->context.lock);
17966 }
17967+
17968+ if (tsk == current) {
17969+ mm->context.vdso = 0;
17970+
17971+#ifdef CONFIG_X86_32
17972+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17973+ mm->context.user_cs_base = 0UL;
17974+ mm->context.user_cs_limit = ~0UL;
17975+
17976+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17977+ cpus_clear(mm->context.cpu_user_cs_mask);
17978+#endif
17979+
17980+#endif
17981+#endif
17982+
17983+ }
17984+
17985 return retval;
17986 }
17987
17988@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17989 }
17990 }
17991
17992+#ifdef CONFIG_PAX_SEGMEXEC
17993+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17994+ error = -EINVAL;
17995+ goto out_unlock;
17996+ }
17997+#endif
17998+
17999 fill_ldt(&ldt, &ldt_info);
18000 if (oldmode)
18001 ldt.avl = 0;
18002diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18003index c1c429d..f02eaf9 100644
18004--- a/arch/x86/kernel/machine_kexec_32.c
18005+++ b/arch/x86/kernel/machine_kexec_32.c
18006@@ -26,7 +26,7 @@
18007 #include <asm/system.h>
18008 #include <asm/cacheflush.h>
18009
18010-static void set_idt(void *newidt, __u16 limit)
18011+static void set_idt(struct desc_struct *newidt, __u16 limit)
18012 {
18013 struct desc_ptr curidt;
18014
18015@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18016 }
18017
18018
18019-static void set_gdt(void *newgdt, __u16 limit)
18020+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18021 {
18022 struct desc_ptr curgdt;
18023
18024@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18025 }
18026
18027 control_page = page_address(image->control_code_page);
18028- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18029+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18030
18031 relocate_kernel_ptr = control_page;
18032 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18033diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18034index 1e47679..e73449d 100644
18035--- a/arch/x86/kernel/microcode_amd.c
18036+++ b/arch/x86/kernel/microcode_amd.c
18037@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18038 uci->mc = NULL;
18039 }
18040
18041-static struct microcode_ops microcode_amd_ops = {
18042+static const struct microcode_ops microcode_amd_ops = {
18043 .request_microcode_user = request_microcode_user,
18044 .request_microcode_fw = request_microcode_fw,
18045 .collect_cpu_info = collect_cpu_info_amd,
18046@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18047 .microcode_fini_cpu = microcode_fini_cpu_amd,
18048 };
18049
18050-struct microcode_ops * __init init_amd_microcode(void)
18051+const struct microcode_ops * __init init_amd_microcode(void)
18052 {
18053 return &microcode_amd_ops;
18054 }
18055diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18056index 378e9a8..b5a6ea9 100644
18057--- a/arch/x86/kernel/microcode_core.c
18058+++ b/arch/x86/kernel/microcode_core.c
18059@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18060
18061 #define MICROCODE_VERSION "2.00"
18062
18063-static struct microcode_ops *microcode_ops;
18064+static const struct microcode_ops *microcode_ops;
18065
18066 /*
18067 * Synchronization.
18068diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18069index 0d334dd..14cedaf 100644
18070--- a/arch/x86/kernel/microcode_intel.c
18071+++ b/arch/x86/kernel/microcode_intel.c
18072@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18073
18074 static int get_ucode_user(void *to, const void *from, size_t n)
18075 {
18076- return copy_from_user(to, from, n);
18077+ return copy_from_user(to, (const void __force_user *)from, n);
18078 }
18079
18080 static enum ucode_state
18081 request_microcode_user(int cpu, const void __user *buf, size_t size)
18082 {
18083- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18084+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18085 }
18086
18087 static void microcode_fini_cpu(int cpu)
18088@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18089 uci->mc = NULL;
18090 }
18091
18092-static struct microcode_ops microcode_intel_ops = {
18093+static const struct microcode_ops microcode_intel_ops = {
18094 .request_microcode_user = request_microcode_user,
18095 .request_microcode_fw = request_microcode_fw,
18096 .collect_cpu_info = collect_cpu_info,
18097@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18098 .microcode_fini_cpu = microcode_fini_cpu,
18099 };
18100
18101-struct microcode_ops * __init init_intel_microcode(void)
18102+const struct microcode_ops * __init init_intel_microcode(void)
18103 {
18104 return &microcode_intel_ops;
18105 }
18106diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18107index 89f386f..9028f51 100644
18108--- a/arch/x86/kernel/module.c
18109+++ b/arch/x86/kernel/module.c
18110@@ -34,7 +34,7 @@
18111 #define DEBUGP(fmt...)
18112 #endif
18113
18114-void *module_alloc(unsigned long size)
18115+static void *__module_alloc(unsigned long size, pgprot_t prot)
18116 {
18117 struct vm_struct *area;
18118
18119@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18120 if (!area)
18121 return NULL;
18122
18123- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18124- PAGE_KERNEL_EXEC);
18125+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18126+}
18127+
18128+void *module_alloc(unsigned long size)
18129+{
18130+
18131+#ifdef CONFIG_PAX_KERNEXEC
18132+ return __module_alloc(size, PAGE_KERNEL);
18133+#else
18134+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18135+#endif
18136+
18137 }
18138
18139 /* Free memory returned from module_alloc */
18140@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18141 vfree(module_region);
18142 }
18143
18144+#ifdef CONFIG_PAX_KERNEXEC
18145+#ifdef CONFIG_X86_32
18146+void *module_alloc_exec(unsigned long size)
18147+{
18148+ struct vm_struct *area;
18149+
18150+ if (size == 0)
18151+ return NULL;
18152+
18153+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18154+ return area ? area->addr : NULL;
18155+}
18156+EXPORT_SYMBOL(module_alloc_exec);
18157+
18158+void module_free_exec(struct module *mod, void *module_region)
18159+{
18160+ vunmap(module_region);
18161+}
18162+EXPORT_SYMBOL(module_free_exec);
18163+#else
18164+void module_free_exec(struct module *mod, void *module_region)
18165+{
18166+ module_free(mod, module_region);
18167+}
18168+EXPORT_SYMBOL(module_free_exec);
18169+
18170+void *module_alloc_exec(unsigned long size)
18171+{
18172+ return __module_alloc(size, PAGE_KERNEL_RX);
18173+}
18174+EXPORT_SYMBOL(module_alloc_exec);
18175+#endif
18176+#endif
18177+
18178 /* We don't need anything special. */
18179 int module_frob_arch_sections(Elf_Ehdr *hdr,
18180 Elf_Shdr *sechdrs,
18181@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18182 unsigned int i;
18183 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18184 Elf32_Sym *sym;
18185- uint32_t *location;
18186+ uint32_t *plocation, location;
18187
18188 DEBUGP("Applying relocate section %u to %u\n", relsec,
18189 sechdrs[relsec].sh_info);
18190 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18191 /* This is where to make the change */
18192- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18193- + rel[i].r_offset;
18194+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18195+ location = (uint32_t)plocation;
18196+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18197+ plocation = ktla_ktva((void *)plocation);
18198 /* This is the symbol it is referring to. Note that all
18199 undefined symbols have been resolved. */
18200 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18201@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18202 switch (ELF32_R_TYPE(rel[i].r_info)) {
18203 case R_386_32:
18204 /* We add the value into the location given */
18205- *location += sym->st_value;
18206+ pax_open_kernel();
18207+ *plocation += sym->st_value;
18208+ pax_close_kernel();
18209 break;
18210 case R_386_PC32:
18211 /* Add the value, subtract its postition */
18212- *location += sym->st_value - (uint32_t)location;
18213+ pax_open_kernel();
18214+ *plocation += sym->st_value - location;
18215+ pax_close_kernel();
18216 break;
18217 default:
18218 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18219@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18220 case R_X86_64_NONE:
18221 break;
18222 case R_X86_64_64:
18223+ pax_open_kernel();
18224 *(u64 *)loc = val;
18225+ pax_close_kernel();
18226 break;
18227 case R_X86_64_32:
18228+ pax_open_kernel();
18229 *(u32 *)loc = val;
18230+ pax_close_kernel();
18231 if (val != *(u32 *)loc)
18232 goto overflow;
18233 break;
18234 case R_X86_64_32S:
18235+ pax_open_kernel();
18236 *(s32 *)loc = val;
18237+ pax_close_kernel();
18238 if ((s64)val != *(s32 *)loc)
18239 goto overflow;
18240 break;
18241 case R_X86_64_PC32:
18242 val -= (u64)loc;
18243+ pax_open_kernel();
18244 *(u32 *)loc = val;
18245+ pax_close_kernel();
18246+
18247 #if 0
18248 if ((s64)val != *(s32 *)loc)
18249 goto overflow;
18250diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18251index 3a7c5a4..9191528 100644
18252--- a/arch/x86/kernel/paravirt-spinlocks.c
18253+++ b/arch/x86/kernel/paravirt-spinlocks.c
18254@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18255 __raw_spin_lock(lock);
18256 }
18257
18258-struct pv_lock_ops pv_lock_ops = {
18259+struct pv_lock_ops pv_lock_ops __read_only = {
18260 #ifdef CONFIG_SMP
18261 .spin_is_locked = __ticket_spin_is_locked,
18262 .spin_is_contended = __ticket_spin_is_contended,
18263diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18264index 1b1739d..dea6077 100644
18265--- a/arch/x86/kernel/paravirt.c
18266+++ b/arch/x86/kernel/paravirt.c
18267@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18268 {
18269 return x;
18270 }
18271+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18272+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18273+#endif
18274
18275 void __init default_banner(void)
18276 {
18277@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18278 * corresponding structure. */
18279 static void *get_call_destination(u8 type)
18280 {
18281- struct paravirt_patch_template tmpl = {
18282+ const struct paravirt_patch_template tmpl = {
18283 .pv_init_ops = pv_init_ops,
18284 .pv_time_ops = pv_time_ops,
18285 .pv_cpu_ops = pv_cpu_ops,
18286@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18287 .pv_lock_ops = pv_lock_ops,
18288 #endif
18289 };
18290+
18291+ pax_track_stack();
18292 return *((void **)&tmpl + type);
18293 }
18294
18295@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18296 if (opfunc == NULL)
18297 /* If there's no function, patch it with a ud2a (BUG) */
18298 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18299- else if (opfunc == _paravirt_nop)
18300+ else if (opfunc == (void *)_paravirt_nop)
18301 /* If the operation is a nop, then nop the callsite */
18302 ret = paravirt_patch_nop();
18303
18304 /* identity functions just return their single argument */
18305- else if (opfunc == _paravirt_ident_32)
18306+ else if (opfunc == (void *)_paravirt_ident_32)
18307 ret = paravirt_patch_ident_32(insnbuf, len);
18308- else if (opfunc == _paravirt_ident_64)
18309+ else if (opfunc == (void *)_paravirt_ident_64)
18310+ ret = paravirt_patch_ident_64(insnbuf, len);
18311+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18312+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18313 ret = paravirt_patch_ident_64(insnbuf, len);
18314+#endif
18315
18316 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18317 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18318@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18319 if (insn_len > len || start == NULL)
18320 insn_len = len;
18321 else
18322- memcpy(insnbuf, start, insn_len);
18323+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18324
18325 return insn_len;
18326 }
18327@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18328 preempt_enable();
18329 }
18330
18331-struct pv_info pv_info = {
18332+struct pv_info pv_info __read_only = {
18333 .name = "bare hardware",
18334 .paravirt_enabled = 0,
18335 .kernel_rpl = 0,
18336 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18337 };
18338
18339-struct pv_init_ops pv_init_ops = {
18340+struct pv_init_ops pv_init_ops __read_only = {
18341 .patch = native_patch,
18342 };
18343
18344-struct pv_time_ops pv_time_ops = {
18345+struct pv_time_ops pv_time_ops __read_only = {
18346 .sched_clock = native_sched_clock,
18347 };
18348
18349-struct pv_irq_ops pv_irq_ops = {
18350+struct pv_irq_ops pv_irq_ops __read_only = {
18351 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18352 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18353 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18354@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18355 #endif
18356 };
18357
18358-struct pv_cpu_ops pv_cpu_ops = {
18359+struct pv_cpu_ops pv_cpu_ops __read_only = {
18360 .cpuid = native_cpuid,
18361 .get_debugreg = native_get_debugreg,
18362 .set_debugreg = native_set_debugreg,
18363@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18364 .end_context_switch = paravirt_nop,
18365 };
18366
18367-struct pv_apic_ops pv_apic_ops = {
18368+struct pv_apic_ops pv_apic_ops __read_only = {
18369 #ifdef CONFIG_X86_LOCAL_APIC
18370 .startup_ipi_hook = paravirt_nop,
18371 #endif
18372 };
18373
18374-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18375+#ifdef CONFIG_X86_32
18376+#ifdef CONFIG_X86_PAE
18377+/* 64-bit pagetable entries */
18378+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18379+#else
18380 /* 32-bit pagetable entries */
18381 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18382+#endif
18383 #else
18384 /* 64-bit pagetable entries */
18385 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18386 #endif
18387
18388-struct pv_mmu_ops pv_mmu_ops = {
18389+struct pv_mmu_ops pv_mmu_ops __read_only = {
18390
18391 .read_cr2 = native_read_cr2,
18392 .write_cr2 = native_write_cr2,
18393@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18394 .make_pud = PTE_IDENT,
18395
18396 .set_pgd = native_set_pgd,
18397+ .set_pgd_batched = native_set_pgd_batched,
18398 #endif
18399 #endif /* PAGETABLE_LEVELS >= 3 */
18400
18401@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18402 },
18403
18404 .set_fixmap = native_set_fixmap,
18405+
18406+#ifdef CONFIG_PAX_KERNEXEC
18407+ .pax_open_kernel = native_pax_open_kernel,
18408+ .pax_close_kernel = native_pax_close_kernel,
18409+#endif
18410+
18411 };
18412
18413 EXPORT_SYMBOL_GPL(pv_time_ops);
18414diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18415index 1a2d4b1..6a0dd55 100644
18416--- a/arch/x86/kernel/pci-calgary_64.c
18417+++ b/arch/x86/kernel/pci-calgary_64.c
18418@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18419 free_pages((unsigned long)vaddr, get_order(size));
18420 }
18421
18422-static struct dma_map_ops calgary_dma_ops = {
18423+static const struct dma_map_ops calgary_dma_ops = {
18424 .alloc_coherent = calgary_alloc_coherent,
18425 .free_coherent = calgary_free_coherent,
18426 .map_sg = calgary_map_sg,
18427diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18428index 6ac3931..42b4414 100644
18429--- a/arch/x86/kernel/pci-dma.c
18430+++ b/arch/x86/kernel/pci-dma.c
18431@@ -14,7 +14,7 @@
18432
18433 static int forbid_dac __read_mostly;
18434
18435-struct dma_map_ops *dma_ops;
18436+const struct dma_map_ops *dma_ops;
18437 EXPORT_SYMBOL(dma_ops);
18438
18439 static int iommu_sac_force __read_mostly;
18440@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18441
18442 int dma_supported(struct device *dev, u64 mask)
18443 {
18444- struct dma_map_ops *ops = get_dma_ops(dev);
18445+ const struct dma_map_ops *ops = get_dma_ops(dev);
18446
18447 #ifdef CONFIG_PCI
18448 if (mask > 0xffffffff && forbid_dac > 0) {
18449diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18450index 1c76691..e3632db 100644
18451--- a/arch/x86/kernel/pci-gart_64.c
18452+++ b/arch/x86/kernel/pci-gart_64.c
18453@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18454 return -1;
18455 }
18456
18457-static struct dma_map_ops gart_dma_ops = {
18458+static const struct dma_map_ops gart_dma_ops = {
18459 .map_sg = gart_map_sg,
18460 .unmap_sg = gart_unmap_sg,
18461 .map_page = gart_map_page,
18462diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18463index a3933d4..c898869 100644
18464--- a/arch/x86/kernel/pci-nommu.c
18465+++ b/arch/x86/kernel/pci-nommu.c
18466@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18467 flush_write_buffers();
18468 }
18469
18470-struct dma_map_ops nommu_dma_ops = {
18471+const struct dma_map_ops nommu_dma_ops = {
18472 .alloc_coherent = dma_generic_alloc_coherent,
18473 .free_coherent = nommu_free_coherent,
18474 .map_sg = nommu_map_sg,
18475diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18476index aaa6b78..4de1881 100644
18477--- a/arch/x86/kernel/pci-swiotlb.c
18478+++ b/arch/x86/kernel/pci-swiotlb.c
18479@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18480 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18481 }
18482
18483-static struct dma_map_ops swiotlb_dma_ops = {
18484+static const struct dma_map_ops swiotlb_dma_ops = {
18485 .mapping_error = swiotlb_dma_mapping_error,
18486 .alloc_coherent = x86_swiotlb_alloc_coherent,
18487 .free_coherent = swiotlb_free_coherent,
18488diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18489index fc6c84d..0312ca2 100644
18490--- a/arch/x86/kernel/process.c
18491+++ b/arch/x86/kernel/process.c
18492@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18493
18494 void free_thread_info(struct thread_info *ti)
18495 {
18496- free_thread_xstate(ti->task);
18497 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18498 }
18499
18500+static struct kmem_cache *task_struct_cachep;
18501+
18502 void arch_task_cache_init(void)
18503 {
18504- task_xstate_cachep =
18505- kmem_cache_create("task_xstate", xstate_size,
18506+ /* create a slab on which task_structs can be allocated */
18507+ task_struct_cachep =
18508+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18509+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18510+
18511+ task_xstate_cachep =
18512+ kmem_cache_create("task_xstate", xstate_size,
18513 __alignof__(union thread_xstate),
18514- SLAB_PANIC | SLAB_NOTRACK, NULL);
18515+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18516+}
18517+
18518+struct task_struct *alloc_task_struct(void)
18519+{
18520+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18521+}
18522+
18523+void free_task_struct(struct task_struct *task)
18524+{
18525+ free_thread_xstate(task);
18526+ kmem_cache_free(task_struct_cachep, task);
18527 }
18528
18529 /*
18530@@ -73,7 +90,7 @@ void exit_thread(void)
18531 unsigned long *bp = t->io_bitmap_ptr;
18532
18533 if (bp) {
18534- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18535+ struct tss_struct *tss = init_tss + get_cpu();
18536
18537 t->io_bitmap_ptr = NULL;
18538 clear_thread_flag(TIF_IO_BITMAP);
18539@@ -93,6 +110,9 @@ void flush_thread(void)
18540
18541 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18542
18543+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18544+ loadsegment(gs, 0);
18545+#endif
18546 tsk->thread.debugreg0 = 0;
18547 tsk->thread.debugreg1 = 0;
18548 tsk->thread.debugreg2 = 0;
18549@@ -307,7 +327,7 @@ void default_idle(void)
18550 EXPORT_SYMBOL(default_idle);
18551 #endif
18552
18553-void stop_this_cpu(void *dummy)
18554+__noreturn void stop_this_cpu(void *dummy)
18555 {
18556 local_irq_disable();
18557 /*
18558@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18559 }
18560 early_param("idle", idle_setup);
18561
18562-unsigned long arch_align_stack(unsigned long sp)
18563+#ifdef CONFIG_PAX_RANDKSTACK
18564+void pax_randomize_kstack(struct pt_regs *regs)
18565 {
18566- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18567- sp -= get_random_int() % 8192;
18568- return sp & ~0xf;
18569-}
18570+ struct thread_struct *thread = &current->thread;
18571+ unsigned long time;
18572
18573-unsigned long arch_randomize_brk(struct mm_struct *mm)
18574-{
18575- unsigned long range_end = mm->brk + 0x02000000;
18576- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18577+ if (!randomize_va_space)
18578+ return;
18579+
18580+ if (v8086_mode(regs))
18581+ return;
18582+
18583+ rdtscl(time);
18584+
18585+ /* P4 seems to return a 0 LSB, ignore it */
18586+#ifdef CONFIG_MPENTIUM4
18587+ time &= 0x3EUL;
18588+ time <<= 2;
18589+#elif defined(CONFIG_X86_64)
18590+ time &= 0xFUL;
18591+ time <<= 4;
18592+#else
18593+ time &= 0x1FUL;
18594+ time <<= 3;
18595+#endif
18596+
18597+ thread->sp0 ^= time;
18598+ load_sp0(init_tss + smp_processor_id(), thread);
18599+
18600+#ifdef CONFIG_X86_64
18601+ percpu_write(kernel_stack, thread->sp0);
18602+#endif
18603 }
18604+#endif
18605
18606diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18607index c40c432..6e1df72 100644
18608--- a/arch/x86/kernel/process_32.c
18609+++ b/arch/x86/kernel/process_32.c
18610@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18611 unsigned long thread_saved_pc(struct task_struct *tsk)
18612 {
18613 return ((unsigned long *)tsk->thread.sp)[3];
18614+//XXX return tsk->thread.eip;
18615 }
18616
18617 #ifndef CONFIG_SMP
18618@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18619 unsigned short ss, gs;
18620 const char *board;
18621
18622- if (user_mode_vm(regs)) {
18623+ if (user_mode(regs)) {
18624 sp = regs->sp;
18625 ss = regs->ss & 0xffff;
18626- gs = get_user_gs(regs);
18627 } else {
18628 sp = (unsigned long) (&regs->sp);
18629 savesegment(ss, ss);
18630- savesegment(gs, gs);
18631 }
18632+ gs = get_user_gs(regs);
18633
18634 printk("\n");
18635
18636@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18637 regs.bx = (unsigned long) fn;
18638 regs.dx = (unsigned long) arg;
18639
18640- regs.ds = __USER_DS;
18641- regs.es = __USER_DS;
18642+ regs.ds = __KERNEL_DS;
18643+ regs.es = __KERNEL_DS;
18644 regs.fs = __KERNEL_PERCPU;
18645- regs.gs = __KERNEL_STACK_CANARY;
18646+ savesegment(gs, regs.gs);
18647 regs.orig_ax = -1;
18648 regs.ip = (unsigned long) kernel_thread_helper;
18649 regs.cs = __KERNEL_CS | get_kernel_rpl();
18650@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18651 struct task_struct *tsk;
18652 int err;
18653
18654- childregs = task_pt_regs(p);
18655+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18656 *childregs = *regs;
18657 childregs->ax = 0;
18658 childregs->sp = sp;
18659
18660 p->thread.sp = (unsigned long) childregs;
18661 p->thread.sp0 = (unsigned long) (childregs+1);
18662+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18663
18664 p->thread.ip = (unsigned long) ret_from_fork;
18665
18666@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18667 struct thread_struct *prev = &prev_p->thread,
18668 *next = &next_p->thread;
18669 int cpu = smp_processor_id();
18670- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18671+ struct tss_struct *tss = init_tss + cpu;
18672 bool preload_fpu;
18673
18674 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18675@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18676 */
18677 lazy_save_gs(prev->gs);
18678
18679+#ifdef CONFIG_PAX_MEMORY_UDEREF
18680+ __set_fs(task_thread_info(next_p)->addr_limit);
18681+#endif
18682+
18683 /*
18684 * Load the per-thread Thread-Local Storage descriptor.
18685 */
18686@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18687 */
18688 arch_end_context_switch(next_p);
18689
18690+ percpu_write(current_task, next_p);
18691+ percpu_write(current_tinfo, &next_p->tinfo);
18692+
18693 if (preload_fpu)
18694 __math_state_restore();
18695
18696@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18697 if (prev->gs | next->gs)
18698 lazy_load_gs(next->gs);
18699
18700- percpu_write(current_task, next_p);
18701-
18702 return prev_p;
18703 }
18704
18705@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18706 } while (count++ < 16);
18707 return 0;
18708 }
18709-
18710diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18711index 39493bc..196816d 100644
18712--- a/arch/x86/kernel/process_64.c
18713+++ b/arch/x86/kernel/process_64.c
18714@@ -91,7 +91,7 @@ static void __exit_idle(void)
18715 void exit_idle(void)
18716 {
18717 /* idle loop has pid 0 */
18718- if (current->pid)
18719+ if (task_pid_nr(current))
18720 return;
18721 __exit_idle();
18722 }
18723@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18724 if (!board)
18725 board = "";
18726 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18727- current->pid, current->comm, print_tainted(),
18728+ task_pid_nr(current), current->comm, print_tainted(),
18729 init_utsname()->release,
18730 (int)strcspn(init_utsname()->version, " "),
18731 init_utsname()->version, board);
18732@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18733 struct pt_regs *childregs;
18734 struct task_struct *me = current;
18735
18736- childregs = ((struct pt_regs *)
18737- (THREAD_SIZE + task_stack_page(p))) - 1;
18738+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18739 *childregs = *regs;
18740
18741 childregs->ax = 0;
18742@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18743 p->thread.sp = (unsigned long) childregs;
18744 p->thread.sp0 = (unsigned long) (childregs+1);
18745 p->thread.usersp = me->thread.usersp;
18746+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18747
18748 set_tsk_thread_flag(p, TIF_FORK);
18749
18750@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18751 struct thread_struct *prev = &prev_p->thread;
18752 struct thread_struct *next = &next_p->thread;
18753 int cpu = smp_processor_id();
18754- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18755+ struct tss_struct *tss = init_tss + cpu;
18756 unsigned fsindex, gsindex;
18757 bool preload_fpu;
18758
18759@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18760 prev->usersp = percpu_read(old_rsp);
18761 percpu_write(old_rsp, next->usersp);
18762 percpu_write(current_task, next_p);
18763+ percpu_write(current_tinfo, &next_p->tinfo);
18764
18765- percpu_write(kernel_stack,
18766- (unsigned long)task_stack_page(next_p) +
18767- THREAD_SIZE - KERNEL_STACK_OFFSET);
18768+ percpu_write(kernel_stack, next->sp0);
18769
18770 /*
18771 * Now maybe reload the debug registers and handle I/O bitmaps
18772@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18773 if (!p || p == current || p->state == TASK_RUNNING)
18774 return 0;
18775 stack = (unsigned long)task_stack_page(p);
18776- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18777+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18778 return 0;
18779 fp = *(u64 *)(p->thread.sp);
18780 do {
18781- if (fp < (unsigned long)stack ||
18782- fp >= (unsigned long)stack+THREAD_SIZE)
18783+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18784 return 0;
18785 ip = *(u64 *)(fp+8);
18786 if (!in_sched_functions(ip))
18787diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18788index c06acdd..3f5fff5 100644
18789--- a/arch/x86/kernel/ptrace.c
18790+++ b/arch/x86/kernel/ptrace.c
18791@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18792 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18793 {
18794 int ret;
18795- unsigned long __user *datap = (unsigned long __user *)data;
18796+ unsigned long __user *datap = (__force unsigned long __user *)data;
18797
18798 switch (request) {
18799 /* read the word at location addr in the USER area. */
18800@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18801 if (addr < 0)
18802 return -EIO;
18803 ret = do_get_thread_area(child, addr,
18804- (struct user_desc __user *) data);
18805+ (__force struct user_desc __user *) data);
18806 break;
18807
18808 case PTRACE_SET_THREAD_AREA:
18809 if (addr < 0)
18810 return -EIO;
18811 ret = do_set_thread_area(child, addr,
18812- (struct user_desc __user *) data, 0);
18813+ (__force struct user_desc __user *) data, 0);
18814 break;
18815 #endif
18816
18817@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18818 #ifdef CONFIG_X86_PTRACE_BTS
18819 case PTRACE_BTS_CONFIG:
18820 ret = ptrace_bts_config
18821- (child, data, (struct ptrace_bts_config __user *)addr);
18822+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18823 break;
18824
18825 case PTRACE_BTS_STATUS:
18826 ret = ptrace_bts_status
18827- (child, data, (struct ptrace_bts_config __user *)addr);
18828+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18829 break;
18830
18831 case PTRACE_BTS_SIZE:
18832@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18833
18834 case PTRACE_BTS_GET:
18835 ret = ptrace_bts_read_record
18836- (child, data, (struct bts_struct __user *) addr);
18837+ (child, data, (__force struct bts_struct __user *) addr);
18838 break;
18839
18840 case PTRACE_BTS_CLEAR:
18841@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18842
18843 case PTRACE_BTS_DRAIN:
18844 ret = ptrace_bts_drain
18845- (child, data, (struct bts_struct __user *) addr);
18846+ (child, data, (__force struct bts_struct __user *) addr);
18847 break;
18848 #endif /* CONFIG_X86_PTRACE_BTS */
18849
18850@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18851 info.si_code = si_code;
18852
18853 /* User-mode ip? */
18854- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18855+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18856
18857 /* Send us the fake SIGTRAP */
18858 force_sig_info(SIGTRAP, &info, tsk);
18859@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18860 * We must return the syscall number to actually look up in the table.
18861 * This can be -1L to skip running any syscall at all.
18862 */
18863-asmregparm long syscall_trace_enter(struct pt_regs *regs)
18864+long syscall_trace_enter(struct pt_regs *regs)
18865 {
18866 long ret = 0;
18867
18868@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18869 return ret ?: regs->orig_ax;
18870 }
18871
18872-asmregparm void syscall_trace_leave(struct pt_regs *regs)
18873+void syscall_trace_leave(struct pt_regs *regs)
18874 {
18875 if (unlikely(current->audit_context))
18876 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18877diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18878index cf98100..e76e03d 100644
18879--- a/arch/x86/kernel/reboot.c
18880+++ b/arch/x86/kernel/reboot.c
18881@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18882 EXPORT_SYMBOL(pm_power_off);
18883
18884 static const struct desc_ptr no_idt = {};
18885-static int reboot_mode;
18886+static unsigned short reboot_mode;
18887 enum reboot_type reboot_type = BOOT_KBD;
18888 int reboot_force;
18889
18890@@ -292,12 +292,12 @@ core_initcall(reboot_init);
18891 controller to pulse the CPU reset line, which is more thorough, but
18892 doesn't work with at least one type of 486 motherboard. It is easy
18893 to stop this code working; hence the copious comments. */
18894-static const unsigned long long
18895-real_mode_gdt_entries [3] =
18896+static struct desc_struct
18897+real_mode_gdt_entries [3] __read_only =
18898 {
18899- 0x0000000000000000ULL, /* Null descriptor */
18900- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18901- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18902+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18903+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18904+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18905 };
18906
18907 static const struct desc_ptr
18908@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18909 * specified by the code and length parameters.
18910 * We assume that length will aways be less that 100!
18911 */
18912-void machine_real_restart(const unsigned char *code, int length)
18913+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
18914 {
18915 local_irq_disable();
18916
18917@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
18918 /* Remap the kernel at virtual address zero, as well as offset zero
18919 from the kernel segment. This assumes the kernel segment starts at
18920 virtual address PAGE_OFFSET. */
18921- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18922- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
18923+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18924+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
18925
18926 /*
18927 * Use `swapper_pg_dir' as our page directory.
18928@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
18929 boot)". This seems like a fairly standard thing that gets set by
18930 REBOOT.COM programs, and the previous reset routine did this
18931 too. */
18932- *((unsigned short *)0x472) = reboot_mode;
18933+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18934
18935 /* For the switch to real mode, copy some code to low memory. It has
18936 to be in the first 64k because it is running in 16-bit mode, and it
18937 has to have the same physical and virtual address, because it turns
18938 off paging. Copy it near the end of the first page, out of the way
18939 of BIOS variables. */
18940- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
18941- real_mode_switch, sizeof (real_mode_switch));
18942- memcpy((void *)(0x1000 - 100), code, length);
18943+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
18944+ memcpy(__va(0x1000 - 100), code, length);
18945
18946 /* Set up the IDT for real mode. */
18947 load_idt(&real_mode_idt);
18948@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
18949 __asm__ __volatile__ ("ljmp $0x0008,%0"
18950 :
18951 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
18952+ do { } while (1);
18953 }
18954 #ifdef CONFIG_APM_MODULE
18955 EXPORT_SYMBOL(machine_real_restart);
18956@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18957 {
18958 }
18959
18960-static void native_machine_emergency_restart(void)
18961+__noreturn static void native_machine_emergency_restart(void)
18962 {
18963 int i;
18964
18965@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
18966 #endif
18967 }
18968
18969-static void __machine_emergency_restart(int emergency)
18970+static __noreturn void __machine_emergency_restart(int emergency)
18971 {
18972 reboot_emergency = emergency;
18973 machine_ops.emergency_restart();
18974 }
18975
18976-static void native_machine_restart(char *__unused)
18977+static __noreturn void native_machine_restart(char *__unused)
18978 {
18979 printk("machine restart\n");
18980
18981@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
18982 __machine_emergency_restart(0);
18983 }
18984
18985-static void native_machine_halt(void)
18986+static __noreturn void native_machine_halt(void)
18987 {
18988 /* stop other cpus and apics */
18989 machine_shutdown();
18990@@ -685,7 +685,7 @@ static void native_machine_halt(void)
18991 stop_this_cpu(NULL);
18992 }
18993
18994-static void native_machine_power_off(void)
18995+__noreturn static void native_machine_power_off(void)
18996 {
18997 if (pm_power_off) {
18998 if (!reboot_force)
18999@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19000 }
19001 /* a fallback in case there is no PM info available */
19002 tboot_shutdown(TB_SHUTDOWN_HALT);
19003+ do { } while (1);
19004 }
19005
19006 struct machine_ops machine_ops = {
19007diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19008index 7a6f3b3..976a959 100644
19009--- a/arch/x86/kernel/relocate_kernel_64.S
19010+++ b/arch/x86/kernel/relocate_kernel_64.S
19011@@ -11,6 +11,7 @@
19012 #include <asm/kexec.h>
19013 #include <asm/processor-flags.h>
19014 #include <asm/pgtable_types.h>
19015+#include <asm/alternative-asm.h>
19016
19017 /*
19018 * Must be relocatable PIC code callable as a C function
19019@@ -167,6 +168,7 @@ identity_mapped:
19020 xorq %r14, %r14
19021 xorq %r15, %r15
19022
19023+ pax_force_retaddr 0, 1
19024 ret
19025
19026 1:
19027diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19028index 5449a26..0b6c759 100644
19029--- a/arch/x86/kernel/setup.c
19030+++ b/arch/x86/kernel/setup.c
19031@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19032
19033 if (!boot_params.hdr.root_flags)
19034 root_mountflags &= ~MS_RDONLY;
19035- init_mm.start_code = (unsigned long) _text;
19036- init_mm.end_code = (unsigned long) _etext;
19037+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19038+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19039 init_mm.end_data = (unsigned long) _edata;
19040 init_mm.brk = _brk_end;
19041
19042- code_resource.start = virt_to_phys(_text);
19043- code_resource.end = virt_to_phys(_etext)-1;
19044- data_resource.start = virt_to_phys(_etext);
19045+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19046+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19047+ data_resource.start = virt_to_phys(_sdata);
19048 data_resource.end = virt_to_phys(_edata)-1;
19049 bss_resource.start = virt_to_phys(&__bss_start);
19050 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19051diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19052index d559af9..524c6ad 100644
19053--- a/arch/x86/kernel/setup_percpu.c
19054+++ b/arch/x86/kernel/setup_percpu.c
19055@@ -25,19 +25,17 @@
19056 # define DBG(x...)
19057 #endif
19058
19059-DEFINE_PER_CPU(int, cpu_number);
19060+#ifdef CONFIG_SMP
19061+DEFINE_PER_CPU(unsigned int, cpu_number);
19062 EXPORT_PER_CPU_SYMBOL(cpu_number);
19063+#endif
19064
19065-#ifdef CONFIG_X86_64
19066 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19067-#else
19068-#define BOOT_PERCPU_OFFSET 0
19069-#endif
19070
19071 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19072 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19073
19074-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19075+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19076 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19077 };
19078 EXPORT_SYMBOL(__per_cpu_offset);
19079@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19080 {
19081 #ifdef CONFIG_X86_32
19082 struct desc_struct gdt;
19083+ unsigned long base = per_cpu_offset(cpu);
19084
19085- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19086- 0x2 | DESCTYPE_S, 0x8);
19087- gdt.s = 1;
19088+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19089+ 0x83 | DESCTYPE_S, 0xC);
19090 write_gdt_entry(get_cpu_gdt_table(cpu),
19091 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19092 #endif
19093@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19094 /* alrighty, percpu areas up and running */
19095 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19096 for_each_possible_cpu(cpu) {
19097+#ifdef CONFIG_CC_STACKPROTECTOR
19098+#ifdef CONFIG_X86_32
19099+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19100+#endif
19101+#endif
19102 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19103 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19104 per_cpu(cpu_number, cpu) = cpu;
19105@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19106 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19107 #endif
19108 #endif
19109+#ifdef CONFIG_CC_STACKPROTECTOR
19110+#ifdef CONFIG_X86_32
19111+ if (!cpu)
19112+ per_cpu(stack_canary.canary, cpu) = canary;
19113+#endif
19114+#endif
19115 /*
19116 * Up to this point, the boot CPU has been using .data.init
19117 * area. Reload any changed state for the boot CPU.
19118diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19119index 6a44a76..a9287a1 100644
19120--- a/arch/x86/kernel/signal.c
19121+++ b/arch/x86/kernel/signal.c
19122@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19123 * Align the stack pointer according to the i386 ABI,
19124 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19125 */
19126- sp = ((sp + 4) & -16ul) - 4;
19127+ sp = ((sp - 12) & -16ul) - 4;
19128 #else /* !CONFIG_X86_32 */
19129 sp = round_down(sp, 16) - 8;
19130 #endif
19131@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19132 * Return an always-bogus address instead so we will die with SIGSEGV.
19133 */
19134 if (onsigstack && !likely(on_sig_stack(sp)))
19135- return (void __user *)-1L;
19136+ return (__force void __user *)-1L;
19137
19138 /* save i387 state */
19139 if (used_math() && save_i387_xstate(*fpstate) < 0)
19140- return (void __user *)-1L;
19141+ return (__force void __user *)-1L;
19142
19143 return (void __user *)sp;
19144 }
19145@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19146 }
19147
19148 if (current->mm->context.vdso)
19149- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19150+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19151 else
19152- restorer = &frame->retcode;
19153+ restorer = (void __user *)&frame->retcode;
19154 if (ka->sa.sa_flags & SA_RESTORER)
19155 restorer = ka->sa.sa_restorer;
19156
19157@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19158 * reasons and because gdb uses it as a signature to notice
19159 * signal handler stack frames.
19160 */
19161- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19162+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19163
19164 if (err)
19165 return -EFAULT;
19166@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19167 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19168
19169 /* Set up to return from userspace. */
19170- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19171+ if (current->mm->context.vdso)
19172+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19173+ else
19174+ restorer = (void __user *)&frame->retcode;
19175 if (ka->sa.sa_flags & SA_RESTORER)
19176 restorer = ka->sa.sa_restorer;
19177 put_user_ex(restorer, &frame->pretcode);
19178@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19179 * reasons and because gdb uses it as a signature to notice
19180 * signal handler stack frames.
19181 */
19182- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19183+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19184 } put_user_catch(err);
19185
19186 if (err)
19187@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19188 int signr;
19189 sigset_t *oldset;
19190
19191+ pax_track_stack();
19192+
19193 /*
19194 * We want the common case to go fast, which is why we may in certain
19195 * cases get here from kernel mode. Just return without doing anything
19196@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19197 * X86_32: vm86 regs switched out by assembly code before reaching
19198 * here, so testing against kernel CS suffices.
19199 */
19200- if (!user_mode(regs))
19201+ if (!user_mode_novm(regs))
19202 return;
19203
19204 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19205diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19206index 7e8e905..64d5c32 100644
19207--- a/arch/x86/kernel/smpboot.c
19208+++ b/arch/x86/kernel/smpboot.c
19209@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19210 */
19211 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19212
19213-void cpu_hotplug_driver_lock()
19214+void cpu_hotplug_driver_lock(void)
19215 {
19216- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19217+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19218 }
19219
19220-void cpu_hotplug_driver_unlock()
19221+void cpu_hotplug_driver_unlock(void)
19222 {
19223- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19224+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19225 }
19226
19227 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19228@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19229 * target processor state.
19230 */
19231 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19232- (unsigned long)stack_start.sp);
19233+ stack_start);
19234
19235 /*
19236 * Run STARTUP IPI loop.
19237@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19238 set_idle_for_cpu(cpu, c_idle.idle);
19239 do_rest:
19240 per_cpu(current_task, cpu) = c_idle.idle;
19241+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19242 #ifdef CONFIG_X86_32
19243 /* Stack for startup_32 can be just as for start_secondary onwards */
19244 irq_ctx_init(cpu);
19245@@ -750,13 +751,15 @@ do_rest:
19246 #else
19247 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19248 initial_gs = per_cpu_offset(cpu);
19249- per_cpu(kernel_stack, cpu) =
19250- (unsigned long)task_stack_page(c_idle.idle) -
19251- KERNEL_STACK_OFFSET + THREAD_SIZE;
19252+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19253 #endif
19254+
19255+ pax_open_kernel();
19256 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19257+ pax_close_kernel();
19258+
19259 initial_code = (unsigned long)start_secondary;
19260- stack_start.sp = (void *) c_idle.idle->thread.sp;
19261+ stack_start = c_idle.idle->thread.sp;
19262
19263 /* start_ip had better be page-aligned! */
19264 start_ip = setup_trampoline();
19265@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19266
19267 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19268
19269+#ifdef CONFIG_PAX_PER_CPU_PGD
19270+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19271+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19272+ KERNEL_PGD_PTRS);
19273+#endif
19274+
19275 err = do_boot_cpu(apicid, cpu);
19276
19277 if (err) {
19278diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19279index 3149032..14f1053 100644
19280--- a/arch/x86/kernel/step.c
19281+++ b/arch/x86/kernel/step.c
19282@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19283 struct desc_struct *desc;
19284 unsigned long base;
19285
19286- seg &= ~7UL;
19287+ seg >>= 3;
19288
19289 mutex_lock(&child->mm->context.lock);
19290- if (unlikely((seg >> 3) >= child->mm->context.size))
19291+ if (unlikely(seg >= child->mm->context.size))
19292 addr = -1L; /* bogus selector, access would fault */
19293 else {
19294 desc = child->mm->context.ldt + seg;
19295@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19296 addr += base;
19297 }
19298 mutex_unlock(&child->mm->context.lock);
19299- }
19300+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19301+ addr = ktla_ktva(addr);
19302
19303 return addr;
19304 }
19305@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19306 unsigned char opcode[15];
19307 unsigned long addr = convert_ip_to_linear(child, regs);
19308
19309+ if (addr == -EINVAL)
19310+ return 0;
19311+
19312 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19313 for (i = 0; i < copied; i++) {
19314 switch (opcode[i]) {
19315@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19316
19317 #ifdef CONFIG_X86_64
19318 case 0x40 ... 0x4f:
19319- if (regs->cs != __USER_CS)
19320+ if ((regs->cs & 0xffff) != __USER_CS)
19321 /* 32-bit mode: register increment */
19322 return 0;
19323 /* 64-bit mode: REX prefix */
19324diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19325index dee1ff7..a397f7f 100644
19326--- a/arch/x86/kernel/sys_i386_32.c
19327+++ b/arch/x86/kernel/sys_i386_32.c
19328@@ -24,6 +24,21 @@
19329
19330 #include <asm/syscalls.h>
19331
19332+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19333+{
19334+ unsigned long pax_task_size = TASK_SIZE;
19335+
19336+#ifdef CONFIG_PAX_SEGMEXEC
19337+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19338+ pax_task_size = SEGMEXEC_TASK_SIZE;
19339+#endif
19340+
19341+ if (len > pax_task_size || addr > pax_task_size - len)
19342+ return -EINVAL;
19343+
19344+ return 0;
19345+}
19346+
19347 /*
19348 * Perform the select(nd, in, out, ex, tv) and mmap() system
19349 * calls. Linux/i386 didn't use to be able to handle more than
19350@@ -58,6 +73,212 @@ out:
19351 return err;
19352 }
19353
19354+unsigned long
19355+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19356+ unsigned long len, unsigned long pgoff, unsigned long flags)
19357+{
19358+ struct mm_struct *mm = current->mm;
19359+ struct vm_area_struct *vma;
19360+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19361+
19362+#ifdef CONFIG_PAX_SEGMEXEC
19363+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19364+ pax_task_size = SEGMEXEC_TASK_SIZE;
19365+#endif
19366+
19367+ pax_task_size -= PAGE_SIZE;
19368+
19369+ if (len > pax_task_size)
19370+ return -ENOMEM;
19371+
19372+ if (flags & MAP_FIXED)
19373+ return addr;
19374+
19375+#ifdef CONFIG_PAX_RANDMMAP
19376+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19377+#endif
19378+
19379+ if (addr) {
19380+ addr = PAGE_ALIGN(addr);
19381+ if (pax_task_size - len >= addr) {
19382+ vma = find_vma(mm, addr);
19383+ if (check_heap_stack_gap(vma, addr, len))
19384+ return addr;
19385+ }
19386+ }
19387+ if (len > mm->cached_hole_size) {
19388+ start_addr = addr = mm->free_area_cache;
19389+ } else {
19390+ start_addr = addr = mm->mmap_base;
19391+ mm->cached_hole_size = 0;
19392+ }
19393+
19394+#ifdef CONFIG_PAX_PAGEEXEC
19395+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19396+ start_addr = 0x00110000UL;
19397+
19398+#ifdef CONFIG_PAX_RANDMMAP
19399+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19400+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19401+#endif
19402+
19403+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19404+ start_addr = addr = mm->mmap_base;
19405+ else
19406+ addr = start_addr;
19407+ }
19408+#endif
19409+
19410+full_search:
19411+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19412+ /* At this point: (!vma || addr < vma->vm_end). */
19413+ if (pax_task_size - len < addr) {
19414+ /*
19415+ * Start a new search - just in case we missed
19416+ * some holes.
19417+ */
19418+ if (start_addr != mm->mmap_base) {
19419+ start_addr = addr = mm->mmap_base;
19420+ mm->cached_hole_size = 0;
19421+ goto full_search;
19422+ }
19423+ return -ENOMEM;
19424+ }
19425+ if (check_heap_stack_gap(vma, addr, len))
19426+ break;
19427+ if (addr + mm->cached_hole_size < vma->vm_start)
19428+ mm->cached_hole_size = vma->vm_start - addr;
19429+ addr = vma->vm_end;
19430+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19431+ start_addr = addr = mm->mmap_base;
19432+ mm->cached_hole_size = 0;
19433+ goto full_search;
19434+ }
19435+ }
19436+
19437+ /*
19438+ * Remember the place where we stopped the search:
19439+ */
19440+ mm->free_area_cache = addr + len;
19441+ return addr;
19442+}
19443+
19444+unsigned long
19445+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19446+ const unsigned long len, const unsigned long pgoff,
19447+ const unsigned long flags)
19448+{
19449+ struct vm_area_struct *vma;
19450+ struct mm_struct *mm = current->mm;
19451+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19452+
19453+#ifdef CONFIG_PAX_SEGMEXEC
19454+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19455+ pax_task_size = SEGMEXEC_TASK_SIZE;
19456+#endif
19457+
19458+ pax_task_size -= PAGE_SIZE;
19459+
19460+ /* requested length too big for entire address space */
19461+ if (len > pax_task_size)
19462+ return -ENOMEM;
19463+
19464+ if (flags & MAP_FIXED)
19465+ return addr;
19466+
19467+#ifdef CONFIG_PAX_PAGEEXEC
19468+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19469+ goto bottomup;
19470+#endif
19471+
19472+#ifdef CONFIG_PAX_RANDMMAP
19473+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19474+#endif
19475+
19476+ /* requesting a specific address */
19477+ if (addr) {
19478+ addr = PAGE_ALIGN(addr);
19479+ if (pax_task_size - len >= addr) {
19480+ vma = find_vma(mm, addr);
19481+ if (check_heap_stack_gap(vma, addr, len))
19482+ return addr;
19483+ }
19484+ }
19485+
19486+ /* check if free_area_cache is useful for us */
19487+ if (len <= mm->cached_hole_size) {
19488+ mm->cached_hole_size = 0;
19489+ mm->free_area_cache = mm->mmap_base;
19490+ }
19491+
19492+ /* either no address requested or can't fit in requested address hole */
19493+ addr = mm->free_area_cache;
19494+
19495+ /* make sure it can fit in the remaining address space */
19496+ if (addr > len) {
19497+ vma = find_vma(mm, addr-len);
19498+ if (check_heap_stack_gap(vma, addr - len, len))
19499+ /* remember the address as a hint for next time */
19500+ return (mm->free_area_cache = addr-len);
19501+ }
19502+
19503+ if (mm->mmap_base < len)
19504+ goto bottomup;
19505+
19506+ addr = mm->mmap_base-len;
19507+
19508+ do {
19509+ /*
19510+ * Lookup failure means no vma is above this address,
19511+ * else if new region fits below vma->vm_start,
19512+ * return with success:
19513+ */
19514+ vma = find_vma(mm, addr);
19515+ if (check_heap_stack_gap(vma, addr, len))
19516+ /* remember the address as a hint for next time */
19517+ return (mm->free_area_cache = addr);
19518+
19519+ /* remember the largest hole we saw so far */
19520+ if (addr + mm->cached_hole_size < vma->vm_start)
19521+ mm->cached_hole_size = vma->vm_start - addr;
19522+
19523+ /* try just below the current vma->vm_start */
19524+ addr = skip_heap_stack_gap(vma, len);
19525+ } while (!IS_ERR_VALUE(addr));
19526+
19527+bottomup:
19528+ /*
19529+ * A failed mmap() very likely causes application failure,
19530+ * so fall back to the bottom-up function here. This scenario
19531+ * can happen with large stack limits and large mmap()
19532+ * allocations.
19533+ */
19534+
19535+#ifdef CONFIG_PAX_SEGMEXEC
19536+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19537+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19538+ else
19539+#endif
19540+
19541+ mm->mmap_base = TASK_UNMAPPED_BASE;
19542+
19543+#ifdef CONFIG_PAX_RANDMMAP
19544+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19545+ mm->mmap_base += mm->delta_mmap;
19546+#endif
19547+
19548+ mm->free_area_cache = mm->mmap_base;
19549+ mm->cached_hole_size = ~0UL;
19550+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19551+ /*
19552+ * Restore the topdown base:
19553+ */
19554+ mm->mmap_base = base;
19555+ mm->free_area_cache = base;
19556+ mm->cached_hole_size = ~0UL;
19557+
19558+ return addr;
19559+}
19560
19561 struct sel_arg_struct {
19562 unsigned long n;
19563@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19564 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19565 case SEMTIMEDOP:
19566 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19567- (const struct timespec __user *)fifth);
19568+ (__force const struct timespec __user *)fifth);
19569
19570 case SEMGET:
19571 return sys_semget(first, second, third);
19572@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19573 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19574 if (ret)
19575 return ret;
19576- return put_user(raddr, (ulong __user *) third);
19577+ return put_user(raddr, (__force ulong __user *) third);
19578 }
19579 case 1: /* iBCS2 emulator entry point */
19580 if (!segment_eq(get_fs(), get_ds()))
19581@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19582
19583 return error;
19584 }
19585-
19586-
19587-/*
19588- * Do a system call from kernel instead of calling sys_execve so we
19589- * end up with proper pt_regs.
19590- */
19591-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19592-{
19593- long __res;
19594- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19595- : "=a" (__res)
19596- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19597- return __res;
19598-}
19599diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19600index 8aa2057..b604bc1 100644
19601--- a/arch/x86/kernel/sys_x86_64.c
19602+++ b/arch/x86/kernel/sys_x86_64.c
19603@@ -32,8 +32,8 @@ out:
19604 return error;
19605 }
19606
19607-static void find_start_end(unsigned long flags, unsigned long *begin,
19608- unsigned long *end)
19609+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19610+ unsigned long *begin, unsigned long *end)
19611 {
19612 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19613 unsigned long new_begin;
19614@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19615 *begin = new_begin;
19616 }
19617 } else {
19618- *begin = TASK_UNMAPPED_BASE;
19619+ *begin = mm->mmap_base;
19620 *end = TASK_SIZE;
19621 }
19622 }
19623@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19624 if (flags & MAP_FIXED)
19625 return addr;
19626
19627- find_start_end(flags, &begin, &end);
19628+ find_start_end(mm, flags, &begin, &end);
19629
19630 if (len > end)
19631 return -ENOMEM;
19632
19633+#ifdef CONFIG_PAX_RANDMMAP
19634+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19635+#endif
19636+
19637 if (addr) {
19638 addr = PAGE_ALIGN(addr);
19639 vma = find_vma(mm, addr);
19640- if (end - len >= addr &&
19641- (!vma || addr + len <= vma->vm_start))
19642+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19643 return addr;
19644 }
19645 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19646@@ -106,7 +109,7 @@ full_search:
19647 }
19648 return -ENOMEM;
19649 }
19650- if (!vma || addr + len <= vma->vm_start) {
19651+ if (check_heap_stack_gap(vma, addr, len)) {
19652 /*
19653 * Remember the place where we stopped the search:
19654 */
19655@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19656 {
19657 struct vm_area_struct *vma;
19658 struct mm_struct *mm = current->mm;
19659- unsigned long addr = addr0;
19660+ unsigned long base = mm->mmap_base, addr = addr0;
19661
19662 /* requested length too big for entire address space */
19663 if (len > TASK_SIZE)
19664@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19665 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19666 goto bottomup;
19667
19668+#ifdef CONFIG_PAX_RANDMMAP
19669+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19670+#endif
19671+
19672 /* requesting a specific address */
19673 if (addr) {
19674 addr = PAGE_ALIGN(addr);
19675- vma = find_vma(mm, addr);
19676- if (TASK_SIZE - len >= addr &&
19677- (!vma || addr + len <= vma->vm_start))
19678- return addr;
19679+ if (TASK_SIZE - len >= addr) {
19680+ vma = find_vma(mm, addr);
19681+ if (check_heap_stack_gap(vma, addr, len))
19682+ return addr;
19683+ }
19684 }
19685
19686 /* check if free_area_cache is useful for us */
19687@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19688 /* make sure it can fit in the remaining address space */
19689 if (addr > len) {
19690 vma = find_vma(mm, addr-len);
19691- if (!vma || addr <= vma->vm_start)
19692+ if (check_heap_stack_gap(vma, addr - len, len))
19693 /* remember the address as a hint for next time */
19694 return mm->free_area_cache = addr-len;
19695 }
19696@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19697 * return with success:
19698 */
19699 vma = find_vma(mm, addr);
19700- if (!vma || addr+len <= vma->vm_start)
19701+ if (check_heap_stack_gap(vma, addr, len))
19702 /* remember the address as a hint for next time */
19703 return mm->free_area_cache = addr;
19704
19705@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19706 mm->cached_hole_size = vma->vm_start - addr;
19707
19708 /* try just below the current vma->vm_start */
19709- addr = vma->vm_start-len;
19710- } while (len < vma->vm_start);
19711+ addr = skip_heap_stack_gap(vma, len);
19712+ } while (!IS_ERR_VALUE(addr));
19713
19714 bottomup:
19715 /*
19716@@ -198,13 +206,21 @@ bottomup:
19717 * can happen with large stack limits and large mmap()
19718 * allocations.
19719 */
19720+ mm->mmap_base = TASK_UNMAPPED_BASE;
19721+
19722+#ifdef CONFIG_PAX_RANDMMAP
19723+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19724+ mm->mmap_base += mm->delta_mmap;
19725+#endif
19726+
19727+ mm->free_area_cache = mm->mmap_base;
19728 mm->cached_hole_size = ~0UL;
19729- mm->free_area_cache = TASK_UNMAPPED_BASE;
19730 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19731 /*
19732 * Restore the topdown base:
19733 */
19734- mm->free_area_cache = mm->mmap_base;
19735+ mm->mmap_base = base;
19736+ mm->free_area_cache = base;
19737 mm->cached_hole_size = ~0UL;
19738
19739 return addr;
19740diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19741index 76d70a4..4c94a44 100644
19742--- a/arch/x86/kernel/syscall_table_32.S
19743+++ b/arch/x86/kernel/syscall_table_32.S
19744@@ -1,3 +1,4 @@
19745+.section .rodata,"a",@progbits
19746 ENTRY(sys_call_table)
19747 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19748 .long sys_exit
19749diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19750index 46b8277..3349d55 100644
19751--- a/arch/x86/kernel/tboot.c
19752+++ b/arch/x86/kernel/tboot.c
19753@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19754
19755 void tboot_shutdown(u32 shutdown_type)
19756 {
19757- void (*shutdown)(void);
19758+ void (* __noreturn shutdown)(void);
19759
19760 if (!tboot_enabled())
19761 return;
19762@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19763
19764 switch_to_tboot_pt();
19765
19766- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19767+ shutdown = (void *)tboot->shutdown_entry;
19768 shutdown();
19769
19770 /* should not reach here */
19771@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19772 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19773 }
19774
19775-static atomic_t ap_wfs_count;
19776+static atomic_unchecked_t ap_wfs_count;
19777
19778 static int tboot_wait_for_aps(int num_aps)
19779 {
19780@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19781 {
19782 switch (action) {
19783 case CPU_DYING:
19784- atomic_inc(&ap_wfs_count);
19785+ atomic_inc_unchecked(&ap_wfs_count);
19786 if (num_online_cpus() == 1)
19787- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19788+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19789 return NOTIFY_BAD;
19790 break;
19791 }
19792@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19793
19794 tboot_create_trampoline();
19795
19796- atomic_set(&ap_wfs_count, 0);
19797+ atomic_set_unchecked(&ap_wfs_count, 0);
19798 register_hotcpu_notifier(&tboot_cpu_notifier);
19799 return 0;
19800 }
19801diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19802index be25734..87fe232 100644
19803--- a/arch/x86/kernel/time.c
19804+++ b/arch/x86/kernel/time.c
19805@@ -26,17 +26,13 @@
19806 int timer_ack;
19807 #endif
19808
19809-#ifdef CONFIG_X86_64
19810-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19811-#endif
19812-
19813 unsigned long profile_pc(struct pt_regs *regs)
19814 {
19815 unsigned long pc = instruction_pointer(regs);
19816
19817- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19818+ if (!user_mode(regs) && in_lock_functions(pc)) {
19819 #ifdef CONFIG_FRAME_POINTER
19820- return *(unsigned long *)(regs->bp + sizeof(long));
19821+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19822 #else
19823 unsigned long *sp =
19824 (unsigned long *)kernel_stack_pointer(regs);
19825@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19826 * or above a saved flags. Eflags has bits 22-31 zero,
19827 * kernel addresses don't.
19828 */
19829+
19830+#ifdef CONFIG_PAX_KERNEXEC
19831+ return ktla_ktva(sp[0]);
19832+#else
19833 if (sp[0] >> 22)
19834 return sp[0];
19835 if (sp[1] >> 22)
19836 return sp[1];
19837 #endif
19838+
19839+#endif
19840 }
19841 return pc;
19842 }
19843diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19844index 6bb7b85..dd853e1 100644
19845--- a/arch/x86/kernel/tls.c
19846+++ b/arch/x86/kernel/tls.c
19847@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19848 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19849 return -EINVAL;
19850
19851+#ifdef CONFIG_PAX_SEGMEXEC
19852+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19853+ return -EINVAL;
19854+#endif
19855+
19856 set_tls_desc(p, idx, &info, 1);
19857
19858 return 0;
19859diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19860index 8508237..229b664 100644
19861--- a/arch/x86/kernel/trampoline_32.S
19862+++ b/arch/x86/kernel/trampoline_32.S
19863@@ -32,6 +32,12 @@
19864 #include <asm/segment.h>
19865 #include <asm/page_types.h>
19866
19867+#ifdef CONFIG_PAX_KERNEXEC
19868+#define ta(X) (X)
19869+#else
19870+#define ta(X) ((X) - __PAGE_OFFSET)
19871+#endif
19872+
19873 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19874 __CPUINITRODATA
19875 .code16
19876@@ -60,7 +66,7 @@ r_base = .
19877 inc %ax # protected mode (PE) bit
19878 lmsw %ax # into protected mode
19879 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19880- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19881+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19882
19883 # These need to be in the same 64K segment as the above;
19884 # hence we don't use the boot_gdt_descr defined in head.S
19885diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19886index 3af2dff..ba8aa49 100644
19887--- a/arch/x86/kernel/trampoline_64.S
19888+++ b/arch/x86/kernel/trampoline_64.S
19889@@ -91,7 +91,7 @@ startup_32:
19890 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19891 movl %eax, %ds
19892
19893- movl $X86_CR4_PAE, %eax
19894+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19895 movl %eax, %cr4 # Enable PAE mode
19896
19897 # Setup trampoline 4 level pagetables
19898@@ -127,7 +127,7 @@ startup_64:
19899 no_longmode:
19900 hlt
19901 jmp no_longmode
19902-#include "verify_cpu_64.S"
19903+#include "verify_cpu.S"
19904
19905 # Careful these need to be in the same 64K segment as the above;
19906 tidt:
19907@@ -138,7 +138,7 @@ tidt:
19908 # so the kernel can live anywhere
19909 .balign 4
19910 tgdt:
19911- .short tgdt_end - tgdt # gdt limit
19912+ .short tgdt_end - tgdt - 1 # gdt limit
19913 .long tgdt - r_base
19914 .short 0
19915 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19916diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19917index 7e37dce..ec3f8e5 100644
19918--- a/arch/x86/kernel/traps.c
19919+++ b/arch/x86/kernel/traps.c
19920@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
19921
19922 /* Do we ignore FPU interrupts ? */
19923 char ignore_fpu_irq;
19924-
19925-/*
19926- * The IDT has to be page-aligned to simplify the Pentium
19927- * F0 0F bug workaround.
19928- */
19929-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19930 #endif
19931
19932 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19933@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19934 static inline void
19935 die_if_kernel(const char *str, struct pt_regs *regs, long err)
19936 {
19937- if (!user_mode_vm(regs))
19938+ if (!user_mode(regs))
19939 die(str, regs, err);
19940 }
19941 #endif
19942
19943 static void __kprobes
19944-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19945+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19946 long error_code, siginfo_t *info)
19947 {
19948 struct task_struct *tsk = current;
19949
19950 #ifdef CONFIG_X86_32
19951- if (regs->flags & X86_VM_MASK) {
19952+ if (v8086_mode(regs)) {
19953 /*
19954 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19955 * On nmi (interrupt 2), do_trap should not be called.
19956@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19957 }
19958 #endif
19959
19960- if (!user_mode(regs))
19961+ if (!user_mode_novm(regs))
19962 goto kernel_trap;
19963
19964 #ifdef CONFIG_X86_32
19965@@ -158,7 +152,7 @@ trap_signal:
19966 printk_ratelimit()) {
19967 printk(KERN_INFO
19968 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19969- tsk->comm, tsk->pid, str,
19970+ tsk->comm, task_pid_nr(tsk), str,
19971 regs->ip, regs->sp, error_code);
19972 print_vma_addr(" in ", regs->ip);
19973 printk("\n");
19974@@ -175,8 +169,20 @@ kernel_trap:
19975 if (!fixup_exception(regs)) {
19976 tsk->thread.error_code = error_code;
19977 tsk->thread.trap_no = trapnr;
19978+
19979+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19980+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19981+ str = "PAX: suspicious stack segment fault";
19982+#endif
19983+
19984 die(str, regs, error_code);
19985 }
19986+
19987+#ifdef CONFIG_PAX_REFCOUNT
19988+ if (trapnr == 4)
19989+ pax_report_refcount_overflow(regs);
19990+#endif
19991+
19992 return;
19993
19994 #ifdef CONFIG_X86_32
19995@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19996 conditional_sti(regs);
19997
19998 #ifdef CONFIG_X86_32
19999- if (regs->flags & X86_VM_MASK)
20000+ if (v8086_mode(regs))
20001 goto gp_in_vm86;
20002 #endif
20003
20004 tsk = current;
20005- if (!user_mode(regs))
20006+ if (!user_mode_novm(regs))
20007 goto gp_in_kernel;
20008
20009+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20010+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20011+ struct mm_struct *mm = tsk->mm;
20012+ unsigned long limit;
20013+
20014+ down_write(&mm->mmap_sem);
20015+ limit = mm->context.user_cs_limit;
20016+ if (limit < TASK_SIZE) {
20017+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20018+ up_write(&mm->mmap_sem);
20019+ return;
20020+ }
20021+ up_write(&mm->mmap_sem);
20022+ }
20023+#endif
20024+
20025 tsk->thread.error_code = error_code;
20026 tsk->thread.trap_no = 13;
20027
20028@@ -305,6 +327,13 @@ gp_in_kernel:
20029 if (notify_die(DIE_GPF, "general protection fault", regs,
20030 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20031 return;
20032+
20033+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20034+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20035+ die("PAX: suspicious general protection fault", regs, error_code);
20036+ else
20037+#endif
20038+
20039 die("general protection fault", regs, error_code);
20040 }
20041
20042@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20043 dotraplinkage notrace __kprobes void
20044 do_nmi(struct pt_regs *regs, long error_code)
20045 {
20046+
20047+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20048+ if (!user_mode(regs)) {
20049+ unsigned long cs = regs->cs & 0xFFFF;
20050+ unsigned long ip = ktva_ktla(regs->ip);
20051+
20052+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20053+ regs->ip = ip;
20054+ }
20055+#endif
20056+
20057 nmi_enter();
20058
20059 inc_irq_stat(__nmi_count);
20060@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20061 }
20062
20063 #ifdef CONFIG_X86_32
20064- if (regs->flags & X86_VM_MASK)
20065+ if (v8086_mode(regs))
20066 goto debug_vm86;
20067 #endif
20068
20069@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20070 * kernel space (but re-enable TF when returning to user mode).
20071 */
20072 if (condition & DR_STEP) {
20073- if (!user_mode(regs))
20074+ if (!user_mode_novm(regs))
20075 goto clear_TF_reenable;
20076 }
20077
20078@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20079 * Handle strange cache flush from user space exception
20080 * in all other cases. This is undocumented behaviour.
20081 */
20082- if (regs->flags & X86_VM_MASK) {
20083+ if (v8086_mode(regs)) {
20084 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20085 return;
20086 }
20087@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20088 void __math_state_restore(void)
20089 {
20090 struct thread_info *thread = current_thread_info();
20091- struct task_struct *tsk = thread->task;
20092+ struct task_struct *tsk = current;
20093
20094 /*
20095 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20096@@ -825,8 +865,7 @@ void __math_state_restore(void)
20097 */
20098 asmlinkage void math_state_restore(void)
20099 {
20100- struct thread_info *thread = current_thread_info();
20101- struct task_struct *tsk = thread->task;
20102+ struct task_struct *tsk = current;
20103
20104 if (!tsk_used_math(tsk)) {
20105 local_irq_enable();
20106diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20107new file mode 100644
20108index 0000000..50c5edd
20109--- /dev/null
20110+++ b/arch/x86/kernel/verify_cpu.S
20111@@ -0,0 +1,140 @@
20112+/*
20113+ *
20114+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20115+ * code has been borrowed from boot/setup.S and was introduced by
20116+ * Andi Kleen.
20117+ *
20118+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20119+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20120+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20121+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20122+ *
20123+ * This source code is licensed under the GNU General Public License,
20124+ * Version 2. See the file COPYING for more details.
20125+ *
20126+ * This is a common code for verification whether CPU supports
20127+ * long mode and SSE or not. It is not called directly instead this
20128+ * file is included at various places and compiled in that context.
20129+ * This file is expected to run in 32bit code. Currently:
20130+ *
20131+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20132+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20133+ * arch/x86/kernel/head_32.S: processor startup
20134+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20135+ *
20136+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20137+ * 0: Success 1: Failure
20138+ *
20139+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20140+ *
20141+ * The caller needs to check for the error code and take the action
20142+ * appropriately. Either display a message or halt.
20143+ */
20144+
20145+#include <asm/cpufeature.h>
20146+#include <asm/msr-index.h>
20147+
20148+verify_cpu:
20149+ pushfl # Save caller passed flags
20150+ pushl $0 # Kill any dangerous flags
20151+ popfl
20152+
20153+ pushfl # standard way to check for cpuid
20154+ popl %eax
20155+ movl %eax,%ebx
20156+ xorl $0x200000,%eax
20157+ pushl %eax
20158+ popfl
20159+ pushfl
20160+ popl %eax
20161+ cmpl %eax,%ebx
20162+ jz verify_cpu_no_longmode # cpu has no cpuid
20163+
20164+ movl $0x0,%eax # See if cpuid 1 is implemented
20165+ cpuid
20166+ cmpl $0x1,%eax
20167+ jb verify_cpu_no_longmode # no cpuid 1
20168+
20169+ xor %di,%di
20170+ cmpl $0x68747541,%ebx # AuthenticAMD
20171+ jnz verify_cpu_noamd
20172+ cmpl $0x69746e65,%edx
20173+ jnz verify_cpu_noamd
20174+ cmpl $0x444d4163,%ecx
20175+ jnz verify_cpu_noamd
20176+ mov $1,%di # cpu is from AMD
20177+ jmp verify_cpu_check
20178+
20179+verify_cpu_noamd:
20180+ cmpl $0x756e6547,%ebx # GenuineIntel?
20181+ jnz verify_cpu_check
20182+ cmpl $0x49656e69,%edx
20183+ jnz verify_cpu_check
20184+ cmpl $0x6c65746e,%ecx
20185+ jnz verify_cpu_check
20186+
20187+ # only call IA32_MISC_ENABLE when:
20188+ # family > 6 || (family == 6 && model >= 0xd)
20189+ movl $0x1, %eax # check CPU family and model
20190+ cpuid
20191+ movl %eax, %ecx
20192+
20193+ andl $0x0ff00f00, %eax # mask family and extended family
20194+ shrl $8, %eax
20195+ cmpl $6, %eax
20196+ ja verify_cpu_clear_xd # family > 6, ok
20197+ jb verify_cpu_check # family < 6, skip
20198+
20199+ andl $0x000f00f0, %ecx # mask model and extended model
20200+ shrl $4, %ecx
20201+ cmpl $0xd, %ecx
20202+ jb verify_cpu_check # family == 6, model < 0xd, skip
20203+
20204+verify_cpu_clear_xd:
20205+ movl $MSR_IA32_MISC_ENABLE, %ecx
20206+ rdmsr
20207+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20208+ jnc verify_cpu_check # only write MSR if bit was changed
20209+ wrmsr
20210+
20211+verify_cpu_check:
20212+ movl $0x1,%eax # Does the cpu have what it takes
20213+ cpuid
20214+ andl $REQUIRED_MASK0,%edx
20215+ xorl $REQUIRED_MASK0,%edx
20216+ jnz verify_cpu_no_longmode
20217+
20218+ movl $0x80000000,%eax # See if extended cpuid is implemented
20219+ cpuid
20220+ cmpl $0x80000001,%eax
20221+ jb verify_cpu_no_longmode # no extended cpuid
20222+
20223+ movl $0x80000001,%eax # Does the cpu have what it takes
20224+ cpuid
20225+ andl $REQUIRED_MASK1,%edx
20226+ xorl $REQUIRED_MASK1,%edx
20227+ jnz verify_cpu_no_longmode
20228+
20229+verify_cpu_sse_test:
20230+ movl $1,%eax
20231+ cpuid
20232+ andl $SSE_MASK,%edx
20233+ cmpl $SSE_MASK,%edx
20234+ je verify_cpu_sse_ok
20235+ test %di,%di
20236+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20237+ movl $MSR_K7_HWCR,%ecx
20238+ rdmsr
20239+ btr $15,%eax # enable SSE
20240+ wrmsr
20241+ xor %di,%di # don't loop
20242+ jmp verify_cpu_sse_test # try again
20243+
20244+verify_cpu_no_longmode:
20245+ popfl # Restore caller passed flags
20246+ movl $1,%eax
20247+ ret
20248+verify_cpu_sse_ok:
20249+ popfl # Restore caller passed flags
20250+ xorl %eax, %eax
20251+ ret
20252diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20253deleted file mode 100644
20254index 45b6f8a..0000000
20255--- a/arch/x86/kernel/verify_cpu_64.S
20256+++ /dev/null
20257@@ -1,105 +0,0 @@
20258-/*
20259- *
20260- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20261- * code has been borrowed from boot/setup.S and was introduced by
20262- * Andi Kleen.
20263- *
20264- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20265- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20266- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20267- *
20268- * This source code is licensed under the GNU General Public License,
20269- * Version 2. See the file COPYING for more details.
20270- *
20271- * This is a common code for verification whether CPU supports
20272- * long mode and SSE or not. It is not called directly instead this
20273- * file is included at various places and compiled in that context.
20274- * Following are the current usage.
20275- *
20276- * This file is included by both 16bit and 32bit code.
20277- *
20278- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20279- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20280- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20281- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20282- *
20283- * verify_cpu, returns the status of cpu check in register %eax.
20284- * 0: Success 1: Failure
20285- *
20286- * The caller needs to check for the error code and take the action
20287- * appropriately. Either display a message or halt.
20288- */
20289-
20290-#include <asm/cpufeature.h>
20291-
20292-verify_cpu:
20293- pushfl # Save caller passed flags
20294- pushl $0 # Kill any dangerous flags
20295- popfl
20296-
20297- pushfl # standard way to check for cpuid
20298- popl %eax
20299- movl %eax,%ebx
20300- xorl $0x200000,%eax
20301- pushl %eax
20302- popfl
20303- pushfl
20304- popl %eax
20305- cmpl %eax,%ebx
20306- jz verify_cpu_no_longmode # cpu has no cpuid
20307-
20308- movl $0x0,%eax # See if cpuid 1 is implemented
20309- cpuid
20310- cmpl $0x1,%eax
20311- jb verify_cpu_no_longmode # no cpuid 1
20312-
20313- xor %di,%di
20314- cmpl $0x68747541,%ebx # AuthenticAMD
20315- jnz verify_cpu_noamd
20316- cmpl $0x69746e65,%edx
20317- jnz verify_cpu_noamd
20318- cmpl $0x444d4163,%ecx
20319- jnz verify_cpu_noamd
20320- mov $1,%di # cpu is from AMD
20321-
20322-verify_cpu_noamd:
20323- movl $0x1,%eax # Does the cpu have what it takes
20324- cpuid
20325- andl $REQUIRED_MASK0,%edx
20326- xorl $REQUIRED_MASK0,%edx
20327- jnz verify_cpu_no_longmode
20328-
20329- movl $0x80000000,%eax # See if extended cpuid is implemented
20330- cpuid
20331- cmpl $0x80000001,%eax
20332- jb verify_cpu_no_longmode # no extended cpuid
20333-
20334- movl $0x80000001,%eax # Does the cpu have what it takes
20335- cpuid
20336- andl $REQUIRED_MASK1,%edx
20337- xorl $REQUIRED_MASK1,%edx
20338- jnz verify_cpu_no_longmode
20339-
20340-verify_cpu_sse_test:
20341- movl $1,%eax
20342- cpuid
20343- andl $SSE_MASK,%edx
20344- cmpl $SSE_MASK,%edx
20345- je verify_cpu_sse_ok
20346- test %di,%di
20347- jz verify_cpu_no_longmode # only try to force SSE on AMD
20348- movl $0xc0010015,%ecx # HWCR
20349- rdmsr
20350- btr $15,%eax # enable SSE
20351- wrmsr
20352- xor %di,%di # don't loop
20353- jmp verify_cpu_sse_test # try again
20354-
20355-verify_cpu_no_longmode:
20356- popfl # Restore caller passed flags
20357- movl $1,%eax
20358- ret
20359-verify_cpu_sse_ok:
20360- popfl # Restore caller passed flags
20361- xorl %eax, %eax
20362- ret
20363diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20364index 9c4e625..c992817 100644
20365--- a/arch/x86/kernel/vm86_32.c
20366+++ b/arch/x86/kernel/vm86_32.c
20367@@ -41,6 +41,7 @@
20368 #include <linux/ptrace.h>
20369 #include <linux/audit.h>
20370 #include <linux/stddef.h>
20371+#include <linux/grsecurity.h>
20372
20373 #include <asm/uaccess.h>
20374 #include <asm/io.h>
20375@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20376 do_exit(SIGSEGV);
20377 }
20378
20379- tss = &per_cpu(init_tss, get_cpu());
20380+ tss = init_tss + get_cpu();
20381 current->thread.sp0 = current->thread.saved_sp0;
20382 current->thread.sysenter_cs = __KERNEL_CS;
20383 load_sp0(tss, &current->thread);
20384@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20385 struct task_struct *tsk;
20386 int tmp, ret = -EPERM;
20387
20388+#ifdef CONFIG_GRKERNSEC_VM86
20389+ if (!capable(CAP_SYS_RAWIO)) {
20390+ gr_handle_vm86();
20391+ goto out;
20392+ }
20393+#endif
20394+
20395 tsk = current;
20396 if (tsk->thread.saved_sp0)
20397 goto out;
20398@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20399 int tmp, ret;
20400 struct vm86plus_struct __user *v86;
20401
20402+#ifdef CONFIG_GRKERNSEC_VM86
20403+ if (!capable(CAP_SYS_RAWIO)) {
20404+ gr_handle_vm86();
20405+ ret = -EPERM;
20406+ goto out;
20407+ }
20408+#endif
20409+
20410 tsk = current;
20411 switch (regs->bx) {
20412 case VM86_REQUEST_IRQ:
20413@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20414 tsk->thread.saved_fs = info->regs32->fs;
20415 tsk->thread.saved_gs = get_user_gs(info->regs32);
20416
20417- tss = &per_cpu(init_tss, get_cpu());
20418+ tss = init_tss + get_cpu();
20419 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20420 if (cpu_has_sep)
20421 tsk->thread.sysenter_cs = 0;
20422@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20423 goto cannot_handle;
20424 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20425 goto cannot_handle;
20426- intr_ptr = (unsigned long __user *) (i << 2);
20427+ intr_ptr = (__force unsigned long __user *) (i << 2);
20428 if (get_user(segoffs, intr_ptr))
20429 goto cannot_handle;
20430 if ((segoffs >> 16) == BIOSSEG)
20431diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20432index d430e4c..831f817 100644
20433--- a/arch/x86/kernel/vmi_32.c
20434+++ b/arch/x86/kernel/vmi_32.c
20435@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20436 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20437
20438 #define call_vrom_func(rom,func) \
20439- (((VROMFUNC *)(rom->func))())
20440+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20441
20442 #define call_vrom_long_func(rom,func,arg) \
20443- (((VROMLONGFUNC *)(rom->func)) (arg))
20444-
20445-static struct vrom_header *vmi_rom;
20446+({\
20447+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20448+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20449+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20450+ __reloc;\
20451+})
20452+
20453+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20454 static int disable_pge;
20455 static int disable_pse;
20456 static int disable_sep;
20457@@ -76,10 +81,10 @@ static struct {
20458 void (*set_initial_ap_state)(int, int);
20459 void (*halt)(void);
20460 void (*set_lazy_mode)(int mode);
20461-} vmi_ops;
20462+} __no_const vmi_ops __read_only;
20463
20464 /* Cached VMI operations */
20465-struct vmi_timer_ops vmi_timer_ops;
20466+struct vmi_timer_ops vmi_timer_ops __read_only;
20467
20468 /*
20469 * VMI patching routines.
20470@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20471 static inline void patch_offset(void *insnbuf,
20472 unsigned long ip, unsigned long dest)
20473 {
20474- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20475+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20476 }
20477
20478 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20479@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20480 {
20481 u64 reloc;
20482 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20483+
20484 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20485 switch(rel->type) {
20486 case VMI_RELOCATION_CALL_REL:
20487@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20488
20489 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20490 {
20491- const pte_t pte = { .pte = 0 };
20492+ const pte_t pte = __pte(0ULL);
20493 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20494 }
20495
20496 static void vmi_pmd_clear(pmd_t *pmd)
20497 {
20498- const pte_t pte = { .pte = 0 };
20499+ const pte_t pte = __pte(0ULL);
20500 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20501 }
20502 #endif
20503@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20504 ap.ss = __KERNEL_DS;
20505 ap.esp = (unsigned long) start_esp;
20506
20507- ap.ds = __USER_DS;
20508- ap.es = __USER_DS;
20509+ ap.ds = __KERNEL_DS;
20510+ ap.es = __KERNEL_DS;
20511 ap.fs = __KERNEL_PERCPU;
20512- ap.gs = __KERNEL_STACK_CANARY;
20513+ savesegment(gs, ap.gs);
20514
20515 ap.eflags = 0;
20516
20517@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20518 paravirt_leave_lazy_mmu();
20519 }
20520
20521+#ifdef CONFIG_PAX_KERNEXEC
20522+static unsigned long vmi_pax_open_kernel(void)
20523+{
20524+ return 0;
20525+}
20526+
20527+static unsigned long vmi_pax_close_kernel(void)
20528+{
20529+ return 0;
20530+}
20531+#endif
20532+
20533 static inline int __init check_vmi_rom(struct vrom_header *rom)
20534 {
20535 struct pci_header *pci;
20536@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20537 return 0;
20538 if (rom->vrom_signature != VMI_SIGNATURE)
20539 return 0;
20540+ if (rom->rom_length * 512 > sizeof(*rom)) {
20541+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20542+ return 0;
20543+ }
20544 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20545 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20546 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20547@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20548 struct vrom_header *romstart;
20549 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20550 if (check_vmi_rom(romstart)) {
20551- vmi_rom = romstart;
20552+ vmi_rom = *romstart;
20553 return 1;
20554 }
20555 }
20556@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20557
20558 para_fill(pv_irq_ops.safe_halt, Halt);
20559
20560+#ifdef CONFIG_PAX_KERNEXEC
20561+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20562+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20563+#endif
20564+
20565 /*
20566 * Alternative instruction rewriting doesn't happen soon enough
20567 * to convert VMI_IRET to a call instead of a jump; so we have
20568@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20569
20570 void __init vmi_init(void)
20571 {
20572- if (!vmi_rom)
20573+ if (!vmi_rom.rom_signature)
20574 probe_vmi_rom();
20575 else
20576- check_vmi_rom(vmi_rom);
20577+ check_vmi_rom(&vmi_rom);
20578
20579 /* In case probing for or validating the ROM failed, basil */
20580- if (!vmi_rom)
20581+ if (!vmi_rom.rom_signature)
20582 return;
20583
20584- reserve_top_address(-vmi_rom->virtual_top);
20585+ reserve_top_address(-vmi_rom.virtual_top);
20586
20587 #ifdef CONFIG_X86_IO_APIC
20588 /* This is virtual hardware; timer routing is wired correctly */
20589@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20590 {
20591 unsigned long flags;
20592
20593- if (!vmi_rom)
20594+ if (!vmi_rom.rom_signature)
20595 return;
20596
20597 local_irq_save(flags);
20598diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20599index 3c68fe2..12c8280 100644
20600--- a/arch/x86/kernel/vmlinux.lds.S
20601+++ b/arch/x86/kernel/vmlinux.lds.S
20602@@ -26,6 +26,13 @@
20603 #include <asm/page_types.h>
20604 #include <asm/cache.h>
20605 #include <asm/boot.h>
20606+#include <asm/segment.h>
20607+
20608+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20609+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20610+#else
20611+#define __KERNEL_TEXT_OFFSET 0
20612+#endif
20613
20614 #undef i386 /* in case the preprocessor is a 32bit one */
20615
20616@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20617 #ifdef CONFIG_X86_32
20618 OUTPUT_ARCH(i386)
20619 ENTRY(phys_startup_32)
20620-jiffies = jiffies_64;
20621 #else
20622 OUTPUT_ARCH(i386:x86-64)
20623 ENTRY(phys_startup_64)
20624-jiffies_64 = jiffies;
20625 #endif
20626
20627 PHDRS {
20628 text PT_LOAD FLAGS(5); /* R_E */
20629- data PT_LOAD FLAGS(7); /* RWE */
20630+#ifdef CONFIG_X86_32
20631+ module PT_LOAD FLAGS(5); /* R_E */
20632+#endif
20633+#ifdef CONFIG_XEN
20634+ rodata PT_LOAD FLAGS(5); /* R_E */
20635+#else
20636+ rodata PT_LOAD FLAGS(4); /* R__ */
20637+#endif
20638+ data PT_LOAD FLAGS(6); /* RW_ */
20639 #ifdef CONFIG_X86_64
20640 user PT_LOAD FLAGS(5); /* R_E */
20641+#endif
20642+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20643 #ifdef CONFIG_SMP
20644 percpu PT_LOAD FLAGS(6); /* RW_ */
20645 #endif
20646+ text.init PT_LOAD FLAGS(5); /* R_E */
20647+ text.exit PT_LOAD FLAGS(5); /* R_E */
20648 init PT_LOAD FLAGS(7); /* RWE */
20649-#endif
20650 note PT_NOTE FLAGS(0); /* ___ */
20651 }
20652
20653 SECTIONS
20654 {
20655 #ifdef CONFIG_X86_32
20656- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20657- phys_startup_32 = startup_32 - LOAD_OFFSET;
20658+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20659 #else
20660- . = __START_KERNEL;
20661- phys_startup_64 = startup_64 - LOAD_OFFSET;
20662+ . = __START_KERNEL;
20663 #endif
20664
20665 /* Text and read-only data */
20666- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20667- _text = .;
20668+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20669 /* bootstrapping code */
20670+#ifdef CONFIG_X86_32
20671+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20672+#else
20673+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20674+#endif
20675+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20676+ _text = .;
20677 HEAD_TEXT
20678 #ifdef CONFIG_X86_32
20679 . = ALIGN(PAGE_SIZE);
20680@@ -82,28 +102,71 @@ SECTIONS
20681 IRQENTRY_TEXT
20682 *(.fixup)
20683 *(.gnu.warning)
20684- /* End of text section */
20685- _etext = .;
20686 } :text = 0x9090
20687
20688- NOTES :text :note
20689+ . += __KERNEL_TEXT_OFFSET;
20690+
20691+#ifdef CONFIG_X86_32
20692+ . = ALIGN(PAGE_SIZE);
20693+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20694+ *(.vmi.rom)
20695+ } :module
20696+
20697+ . = ALIGN(PAGE_SIZE);
20698+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20699+
20700+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20701+ MODULES_EXEC_VADDR = .;
20702+ BYTE(0)
20703+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20704+ . = ALIGN(HPAGE_SIZE);
20705+ MODULES_EXEC_END = . - 1;
20706+#endif
20707
20708- EXCEPTION_TABLE(16) :text = 0x9090
20709+ } :module
20710+#endif
20711+
20712+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20713+ /* End of text section */
20714+ _etext = . - __KERNEL_TEXT_OFFSET;
20715+ }
20716+
20717+#ifdef CONFIG_X86_32
20718+ . = ALIGN(PAGE_SIZE);
20719+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20720+ *(.idt)
20721+ . = ALIGN(PAGE_SIZE);
20722+ *(.empty_zero_page)
20723+ *(.swapper_pg_fixmap)
20724+ *(.swapper_pg_pmd)
20725+ *(.swapper_pg_dir)
20726+ *(.trampoline_pg_dir)
20727+ } :rodata
20728+#endif
20729+
20730+ . = ALIGN(PAGE_SIZE);
20731+ NOTES :rodata :note
20732+
20733+ EXCEPTION_TABLE(16) :rodata
20734
20735 RO_DATA(PAGE_SIZE)
20736
20737 /* Data */
20738 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20739+
20740+#ifdef CONFIG_PAX_KERNEXEC
20741+ . = ALIGN(HPAGE_SIZE);
20742+#else
20743+ . = ALIGN(PAGE_SIZE);
20744+#endif
20745+
20746 /* Start of data section */
20747 _sdata = .;
20748
20749 /* init_task */
20750 INIT_TASK_DATA(THREAD_SIZE)
20751
20752-#ifdef CONFIG_X86_32
20753- /* 32 bit has nosave before _edata */
20754 NOSAVE_DATA
20755-#endif
20756
20757 PAGE_ALIGNED_DATA(PAGE_SIZE)
20758
20759@@ -112,6 +175,8 @@ SECTIONS
20760 DATA_DATA
20761 CONSTRUCTORS
20762
20763+ jiffies = jiffies_64;
20764+
20765 /* rarely changed data like cpu maps */
20766 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20767
20768@@ -166,12 +231,6 @@ SECTIONS
20769 }
20770 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20771
20772- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20773- .jiffies : AT(VLOAD(.jiffies)) {
20774- *(.jiffies)
20775- }
20776- jiffies = VVIRT(.jiffies);
20777-
20778 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20779 *(.vsyscall_3)
20780 }
20781@@ -187,12 +246,19 @@ SECTIONS
20782 #endif /* CONFIG_X86_64 */
20783
20784 /* Init code and data - will be freed after init */
20785- . = ALIGN(PAGE_SIZE);
20786 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20787+ BYTE(0)
20788+
20789+#ifdef CONFIG_PAX_KERNEXEC
20790+ . = ALIGN(HPAGE_SIZE);
20791+#else
20792+ . = ALIGN(PAGE_SIZE);
20793+#endif
20794+
20795 __init_begin = .; /* paired with __init_end */
20796- }
20797+ } :init.begin
20798
20799-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20800+#ifdef CONFIG_SMP
20801 /*
20802 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20803 * output PHDR, so the next output section - .init.text - should
20804@@ -201,12 +267,27 @@ SECTIONS
20805 PERCPU_VADDR(0, :percpu)
20806 #endif
20807
20808- INIT_TEXT_SECTION(PAGE_SIZE)
20809-#ifdef CONFIG_X86_64
20810- :init
20811-#endif
20812+ . = ALIGN(PAGE_SIZE);
20813+ init_begin = .;
20814+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20815+ VMLINUX_SYMBOL(_sinittext) = .;
20816+ INIT_TEXT
20817+ VMLINUX_SYMBOL(_einittext) = .;
20818+ . = ALIGN(PAGE_SIZE);
20819+ } :text.init
20820
20821- INIT_DATA_SECTION(16)
20822+ /*
20823+ * .exit.text is discard at runtime, not link time, to deal with
20824+ * references from .altinstructions and .eh_frame
20825+ */
20826+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20827+ EXIT_TEXT
20828+ . = ALIGN(16);
20829+ } :text.exit
20830+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20831+
20832+ . = ALIGN(PAGE_SIZE);
20833+ INIT_DATA_SECTION(16) :init
20834
20835 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20836 __x86_cpu_dev_start = .;
20837@@ -232,19 +313,11 @@ SECTIONS
20838 *(.altinstr_replacement)
20839 }
20840
20841- /*
20842- * .exit.text is discard at runtime, not link time, to deal with
20843- * references from .altinstructions and .eh_frame
20844- */
20845- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20846- EXIT_TEXT
20847- }
20848-
20849 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20850 EXIT_DATA
20851 }
20852
20853-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20854+#ifndef CONFIG_SMP
20855 PERCPU(PAGE_SIZE)
20856 #endif
20857
20858@@ -267,12 +340,6 @@ SECTIONS
20859 . = ALIGN(PAGE_SIZE);
20860 }
20861
20862-#ifdef CONFIG_X86_64
20863- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20864- NOSAVE_DATA
20865- }
20866-#endif
20867-
20868 /* BSS */
20869 . = ALIGN(PAGE_SIZE);
20870 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20871@@ -288,6 +355,7 @@ SECTIONS
20872 __brk_base = .;
20873 . += 64 * 1024; /* 64k alignment slop space */
20874 *(.brk_reservation) /* areas brk users have reserved */
20875+ . = ALIGN(HPAGE_SIZE);
20876 __brk_limit = .;
20877 }
20878
20879@@ -316,13 +384,12 @@ SECTIONS
20880 * for the boot processor.
20881 */
20882 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20883-INIT_PER_CPU(gdt_page);
20884 INIT_PER_CPU(irq_stack_union);
20885
20886 /*
20887 * Build-time check on the image size:
20888 */
20889-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20890+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20891 "kernel image bigger than KERNEL_IMAGE_SIZE");
20892
20893 #ifdef CONFIG_SMP
20894diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20895index 62f39d7..3bc46a1 100644
20896--- a/arch/x86/kernel/vsyscall_64.c
20897+++ b/arch/x86/kernel/vsyscall_64.c
20898@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20899
20900 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20901 /* copy vsyscall data */
20902+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20903 vsyscall_gtod_data.clock.vread = clock->vread;
20904 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20905 vsyscall_gtod_data.clock.mask = clock->mask;
20906@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20907 We do this here because otherwise user space would do it on
20908 its own in a likely inferior way (no access to jiffies).
20909 If you don't like it pass NULL. */
20910- if (tcache && tcache->blob[0] == (j = __jiffies)) {
20911+ if (tcache && tcache->blob[0] == (j = jiffies)) {
20912 p = tcache->blob[1];
20913 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
20914 /* Load per CPU data from RDTSCP */
20915diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20916index 3909e3b..5433a97 100644
20917--- a/arch/x86/kernel/x8664_ksyms_64.c
20918+++ b/arch/x86/kernel/x8664_ksyms_64.c
20919@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
20920
20921 EXPORT_SYMBOL(copy_user_generic);
20922 EXPORT_SYMBOL(__copy_user_nocache);
20923-EXPORT_SYMBOL(copy_from_user);
20924-EXPORT_SYMBOL(copy_to_user);
20925 EXPORT_SYMBOL(__copy_from_user_inatomic);
20926
20927 EXPORT_SYMBOL(copy_page);
20928diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20929index c5ee17e..d63218f 100644
20930--- a/arch/x86/kernel/xsave.c
20931+++ b/arch/x86/kernel/xsave.c
20932@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20933 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20934 return -1;
20935
20936- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20937+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20938 fx_sw_user->extended_size -
20939 FP_XSTATE_MAGIC2_SIZE));
20940 /*
20941@@ -196,7 +196,7 @@ fx_only:
20942 * the other extended state.
20943 */
20944 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20945- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20946+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20947 }
20948
20949 /*
20950@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
20951 if (task_thread_info(tsk)->status & TS_XSAVE)
20952 err = restore_user_xstate(buf);
20953 else
20954- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20955+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
20956 buf);
20957 if (unlikely(err)) {
20958 /*
20959diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20960index 1350e43..a94b011 100644
20961--- a/arch/x86/kvm/emulate.c
20962+++ b/arch/x86/kvm/emulate.c
20963@@ -81,8 +81,8 @@
20964 #define Src2CL (1<<29)
20965 #define Src2ImmByte (2<<29)
20966 #define Src2One (3<<29)
20967-#define Src2Imm16 (4<<29)
20968-#define Src2Mask (7<<29)
20969+#define Src2Imm16 (4U<<29)
20970+#define Src2Mask (7U<<29)
20971
20972 enum {
20973 Group1_80, Group1_81, Group1_82, Group1_83,
20974@@ -411,6 +411,7 @@ static u32 group2_table[] = {
20975
20976 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
20977 do { \
20978+ unsigned long _tmp; \
20979 __asm__ __volatile__ ( \
20980 _PRE_EFLAGS("0", "4", "2") \
20981 _op _suffix " %"_x"3,%1; " \
20982@@ -424,8 +425,6 @@ static u32 group2_table[] = {
20983 /* Raw emulation: instruction has two explicit operands. */
20984 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
20985 do { \
20986- unsigned long _tmp; \
20987- \
20988 switch ((_dst).bytes) { \
20989 case 2: \
20990 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
20991@@ -441,7 +440,6 @@ static u32 group2_table[] = {
20992
20993 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20994 do { \
20995- unsigned long _tmp; \
20996 switch ((_dst).bytes) { \
20997 case 1: \
20998 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
20999diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21000index 8dfeaaa..4daa395 100644
21001--- a/arch/x86/kvm/lapic.c
21002+++ b/arch/x86/kvm/lapic.c
21003@@ -52,7 +52,7 @@
21004 #define APIC_BUS_CYCLE_NS 1
21005
21006 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21007-#define apic_debug(fmt, arg...)
21008+#define apic_debug(fmt, arg...) do {} while (0)
21009
21010 #define APIC_LVT_NUM 6
21011 /* 14 is the version for Xeon and Pentium 8.4.8*/
21012diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21013index 3bc2707..dd157e2 100644
21014--- a/arch/x86/kvm/paging_tmpl.h
21015+++ b/arch/x86/kvm/paging_tmpl.h
21016@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21017 int level = PT_PAGE_TABLE_LEVEL;
21018 unsigned long mmu_seq;
21019
21020+ pax_track_stack();
21021+
21022 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21023 kvm_mmu_audit(vcpu, "pre page fault");
21024
21025@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21026 kvm_mmu_free_some_pages(vcpu);
21027 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21028 level, &write_pt, pfn);
21029+ (void)sptep;
21030 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21031 sptep, *sptep, write_pt);
21032
21033diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21034index 7c6e63e..c5d92c1 100644
21035--- a/arch/x86/kvm/svm.c
21036+++ b/arch/x86/kvm/svm.c
21037@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21038 int cpu = raw_smp_processor_id();
21039
21040 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21041+
21042+ pax_open_kernel();
21043 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21044+ pax_close_kernel();
21045+
21046 load_TR_desc();
21047 }
21048
21049@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21050 return true;
21051 }
21052
21053-static struct kvm_x86_ops svm_x86_ops = {
21054+static const struct kvm_x86_ops svm_x86_ops = {
21055 .cpu_has_kvm_support = has_svm,
21056 .disabled_by_bios = is_disabled,
21057 .hardware_setup = svm_hardware_setup,
21058diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21059index e6d925f..e7a4af8 100644
21060--- a/arch/x86/kvm/vmx.c
21061+++ b/arch/x86/kvm/vmx.c
21062@@ -570,7 +570,11 @@ static void reload_tss(void)
21063
21064 kvm_get_gdt(&gdt);
21065 descs = (void *)gdt.base;
21066+
21067+ pax_open_kernel();
21068 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21069+ pax_close_kernel();
21070+
21071 load_TR_desc();
21072 }
21073
21074@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21075 if (!cpu_has_vmx_flexpriority())
21076 flexpriority_enabled = 0;
21077
21078- if (!cpu_has_vmx_tpr_shadow())
21079- kvm_x86_ops->update_cr8_intercept = NULL;
21080+ if (!cpu_has_vmx_tpr_shadow()) {
21081+ pax_open_kernel();
21082+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21083+ pax_close_kernel();
21084+ }
21085
21086 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21087 kvm_disable_largepages();
21088@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21089 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21090
21091 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21092- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21093+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21094 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21095 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21096 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21097@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21098 "jmp .Lkvm_vmx_return \n\t"
21099 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21100 ".Lkvm_vmx_return: "
21101+
21102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21103+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21104+ ".Lkvm_vmx_return2: "
21105+#endif
21106+
21107 /* Save guest registers, load host registers, keep flags */
21108 "xchg %0, (%%"R"sp) \n\t"
21109 "mov %%"R"ax, %c[rax](%0) \n\t"
21110@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21111 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21112 #endif
21113 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21114+
21115+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21116+ ,[cs]"i"(__KERNEL_CS)
21117+#endif
21118+
21119 : "cc", "memory"
21120- , R"bx", R"di", R"si"
21121+ , R"ax", R"bx", R"di", R"si"
21122 #ifdef CONFIG_X86_64
21123 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21124 #endif
21125@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21126 if (vmx->rmode.irq.pending)
21127 fixup_rmode_irq(vmx);
21128
21129- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21130+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21131+
21132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21133+ loadsegment(fs, __KERNEL_PERCPU);
21134+#endif
21135+
21136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21137+ __set_fs(current_thread_info()->addr_limit);
21138+#endif
21139+
21140 vmx->launched = 1;
21141
21142 vmx_complete_interrupts(vmx);
21143@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21144 return false;
21145 }
21146
21147-static struct kvm_x86_ops vmx_x86_ops = {
21148+static const struct kvm_x86_ops vmx_x86_ops = {
21149 .cpu_has_kvm_support = cpu_has_kvm_support,
21150 .disabled_by_bios = vmx_disabled_by_bios,
21151 .hardware_setup = hardware_setup,
21152diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21153index df1cefb..5e882ad 100644
21154--- a/arch/x86/kvm/x86.c
21155+++ b/arch/x86/kvm/x86.c
21156@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21157 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21158 struct kvm_cpuid_entry2 __user *entries);
21159
21160-struct kvm_x86_ops *kvm_x86_ops;
21161+const struct kvm_x86_ops *kvm_x86_ops;
21162 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21163
21164 int ignore_msrs = 0;
21165@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21166 struct kvm_cpuid2 *cpuid,
21167 struct kvm_cpuid_entry2 __user *entries)
21168 {
21169- int r;
21170+ int r, i;
21171
21172 r = -E2BIG;
21173 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21174 goto out;
21175 r = -EFAULT;
21176- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21177- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21178+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21179 goto out;
21180+ for (i = 0; i < cpuid->nent; ++i) {
21181+ struct kvm_cpuid_entry2 cpuid_entry;
21182+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21183+ goto out;
21184+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21185+ }
21186 vcpu->arch.cpuid_nent = cpuid->nent;
21187 kvm_apic_set_version(vcpu);
21188 return 0;
21189@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21190 struct kvm_cpuid2 *cpuid,
21191 struct kvm_cpuid_entry2 __user *entries)
21192 {
21193- int r;
21194+ int r, i;
21195
21196 vcpu_load(vcpu);
21197 r = -E2BIG;
21198 if (cpuid->nent < vcpu->arch.cpuid_nent)
21199 goto out;
21200 r = -EFAULT;
21201- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21202- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21203+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21204 goto out;
21205+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21206+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21207+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21208+ goto out;
21209+ }
21210 return 0;
21211
21212 out:
21213@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21214 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21215 struct kvm_interrupt *irq)
21216 {
21217- if (irq->irq < 0 || irq->irq >= 256)
21218+ if (irq->irq >= 256)
21219 return -EINVAL;
21220 if (irqchip_in_kernel(vcpu->kvm))
21221 return -ENXIO;
21222@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21223 .notifier_call = kvmclock_cpufreq_notifier
21224 };
21225
21226-int kvm_arch_init(void *opaque)
21227+int kvm_arch_init(const void *opaque)
21228 {
21229 int r, cpu;
21230- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21231+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21232
21233 if (kvm_x86_ops) {
21234 printk(KERN_ERR "kvm: already loaded the other module\n");
21235diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21236index 7e59dc1..b88c98f 100644
21237--- a/arch/x86/lguest/boot.c
21238+++ b/arch/x86/lguest/boot.c
21239@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21240 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21241 * Launcher to reboot us.
21242 */
21243-static void lguest_restart(char *reason)
21244+static __noreturn void lguest_restart(char *reason)
21245 {
21246 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21247+ BUG();
21248 }
21249
21250 /*G:050
21251diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21252index 824fa0b..c619e96 100644
21253--- a/arch/x86/lib/atomic64_32.c
21254+++ b/arch/x86/lib/atomic64_32.c
21255@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21256 }
21257 EXPORT_SYMBOL(atomic64_cmpxchg);
21258
21259+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21260+{
21261+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21262+}
21263+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21264+
21265 /**
21266 * atomic64_xchg - xchg atomic64 variable
21267 * @ptr: pointer to type atomic64_t
21268@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21269 EXPORT_SYMBOL(atomic64_xchg);
21270
21271 /**
21272+ * atomic64_xchg_unchecked - xchg atomic64 variable
21273+ * @ptr: pointer to type atomic64_unchecked_t
21274+ * @new_val: value to assign
21275+ *
21276+ * Atomically xchgs the value of @ptr to @new_val and returns
21277+ * the old value.
21278+ */
21279+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21280+{
21281+ /*
21282+ * Try first with a (possibly incorrect) assumption about
21283+ * what we have there. We'll do two loops most likely,
21284+ * but we'll get an ownership MESI transaction straight away
21285+ * instead of a read transaction followed by a
21286+ * flush-for-ownership transaction:
21287+ */
21288+ u64 old_val, real_val = 0;
21289+
21290+ do {
21291+ old_val = real_val;
21292+
21293+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21294+
21295+ } while (real_val != old_val);
21296+
21297+ return old_val;
21298+}
21299+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21300+
21301+/**
21302 * atomic64_set - set atomic64 variable
21303 * @ptr: pointer to type atomic64_t
21304 * @new_val: value to assign
21305@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21306 EXPORT_SYMBOL(atomic64_set);
21307
21308 /**
21309-EXPORT_SYMBOL(atomic64_read);
21310+ * atomic64_unchecked_set - set atomic64 variable
21311+ * @ptr: pointer to type atomic64_unchecked_t
21312+ * @new_val: value to assign
21313+ *
21314+ * Atomically sets the value of @ptr to @new_val.
21315+ */
21316+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21317+{
21318+ atomic64_xchg_unchecked(ptr, new_val);
21319+}
21320+EXPORT_SYMBOL(atomic64_set_unchecked);
21321+
21322+/**
21323 * atomic64_add_return - add and return
21324 * @delta: integer value to add
21325 * @ptr: pointer to type atomic64_t
21326@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21327 }
21328 EXPORT_SYMBOL(atomic64_add_return);
21329
21330+/**
21331+ * atomic64_add_return_unchecked - add and return
21332+ * @delta: integer value to add
21333+ * @ptr: pointer to type atomic64_unchecked_t
21334+ *
21335+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21336+ */
21337+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21338+{
21339+ /*
21340+ * Try first with a (possibly incorrect) assumption about
21341+ * what we have there. We'll do two loops most likely,
21342+ * but we'll get an ownership MESI transaction straight away
21343+ * instead of a read transaction followed by a
21344+ * flush-for-ownership transaction:
21345+ */
21346+ u64 old_val, new_val, real_val = 0;
21347+
21348+ do {
21349+ old_val = real_val;
21350+ new_val = old_val + delta;
21351+
21352+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21353+
21354+ } while (real_val != old_val);
21355+
21356+ return new_val;
21357+}
21358+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21359+
21360 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21361 {
21362 return atomic64_add_return(-delta, ptr);
21363 }
21364 EXPORT_SYMBOL(atomic64_sub_return);
21365
21366+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21367+{
21368+ return atomic64_add_return_unchecked(-delta, ptr);
21369+}
21370+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21371+
21372 u64 atomic64_inc_return(atomic64_t *ptr)
21373 {
21374 return atomic64_add_return(1, ptr);
21375 }
21376 EXPORT_SYMBOL(atomic64_inc_return);
21377
21378+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21379+{
21380+ return atomic64_add_return_unchecked(1, ptr);
21381+}
21382+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21383+
21384 u64 atomic64_dec_return(atomic64_t *ptr)
21385 {
21386 return atomic64_sub_return(1, ptr);
21387 }
21388 EXPORT_SYMBOL(atomic64_dec_return);
21389
21390+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21391+{
21392+ return atomic64_sub_return_unchecked(1, ptr);
21393+}
21394+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21395+
21396 /**
21397 * atomic64_add - add integer to atomic64 variable
21398 * @delta: integer value to add
21399@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21400 EXPORT_SYMBOL(atomic64_add);
21401
21402 /**
21403+ * atomic64_add_unchecked - add integer to atomic64 variable
21404+ * @delta: integer value to add
21405+ * @ptr: pointer to type atomic64_unchecked_t
21406+ *
21407+ * Atomically adds @delta to @ptr.
21408+ */
21409+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21410+{
21411+ atomic64_add_return_unchecked(delta, ptr);
21412+}
21413+EXPORT_SYMBOL(atomic64_add_unchecked);
21414+
21415+/**
21416 * atomic64_sub - subtract the atomic64 variable
21417 * @delta: integer value to subtract
21418 * @ptr: pointer to type atomic64_t
21419@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21420 EXPORT_SYMBOL(atomic64_sub);
21421
21422 /**
21423+ * atomic64_sub_unchecked - subtract the atomic64 variable
21424+ * @delta: integer value to subtract
21425+ * @ptr: pointer to type atomic64_unchecked_t
21426+ *
21427+ * Atomically subtracts @delta from @ptr.
21428+ */
21429+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21430+{
21431+ atomic64_add_unchecked(-delta, ptr);
21432+}
21433+EXPORT_SYMBOL(atomic64_sub_unchecked);
21434+
21435+/**
21436 * atomic64_sub_and_test - subtract value from variable and test result
21437 * @delta: integer value to subtract
21438 * @ptr: pointer to type atomic64_t
21439@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21440 EXPORT_SYMBOL(atomic64_inc);
21441
21442 /**
21443+ * atomic64_inc_unchecked - increment atomic64 variable
21444+ * @ptr: pointer to type atomic64_unchecked_t
21445+ *
21446+ * Atomically increments @ptr by 1.
21447+ */
21448+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21449+{
21450+ atomic64_add_unchecked(1, ptr);
21451+}
21452+EXPORT_SYMBOL(atomic64_inc_unchecked);
21453+
21454+/**
21455 * atomic64_dec - decrement atomic64 variable
21456 * @ptr: pointer to type atomic64_t
21457 *
21458@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21459 EXPORT_SYMBOL(atomic64_dec);
21460
21461 /**
21462+ * atomic64_dec_unchecked - decrement atomic64 variable
21463+ * @ptr: pointer to type atomic64_unchecked_t
21464+ *
21465+ * Atomically decrements @ptr by 1.
21466+ */
21467+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21468+{
21469+ atomic64_sub_unchecked(1, ptr);
21470+}
21471+EXPORT_SYMBOL(atomic64_dec_unchecked);
21472+
21473+/**
21474 * atomic64_dec_and_test - decrement and test
21475 * @ptr: pointer to type atomic64_t
21476 *
21477diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21478index adbccd0..98f96c8 100644
21479--- a/arch/x86/lib/checksum_32.S
21480+++ b/arch/x86/lib/checksum_32.S
21481@@ -28,7 +28,8 @@
21482 #include <linux/linkage.h>
21483 #include <asm/dwarf2.h>
21484 #include <asm/errno.h>
21485-
21486+#include <asm/segment.h>
21487+
21488 /*
21489 * computes a partial checksum, e.g. for TCP/UDP fragments
21490 */
21491@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21492
21493 #define ARGBASE 16
21494 #define FP 12
21495-
21496-ENTRY(csum_partial_copy_generic)
21497+
21498+ENTRY(csum_partial_copy_generic_to_user)
21499 CFI_STARTPROC
21500+
21501+#ifdef CONFIG_PAX_MEMORY_UDEREF
21502+ pushl %gs
21503+ CFI_ADJUST_CFA_OFFSET 4
21504+ popl %es
21505+ CFI_ADJUST_CFA_OFFSET -4
21506+ jmp csum_partial_copy_generic
21507+#endif
21508+
21509+ENTRY(csum_partial_copy_generic_from_user)
21510+
21511+#ifdef CONFIG_PAX_MEMORY_UDEREF
21512+ pushl %gs
21513+ CFI_ADJUST_CFA_OFFSET 4
21514+ popl %ds
21515+ CFI_ADJUST_CFA_OFFSET -4
21516+#endif
21517+
21518+ENTRY(csum_partial_copy_generic)
21519 subl $4,%esp
21520 CFI_ADJUST_CFA_OFFSET 4
21521 pushl %edi
21522@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21523 jmp 4f
21524 SRC(1: movw (%esi), %bx )
21525 addl $2, %esi
21526-DST( movw %bx, (%edi) )
21527+DST( movw %bx, %es:(%edi) )
21528 addl $2, %edi
21529 addw %bx, %ax
21530 adcl $0, %eax
21531@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21532 SRC(1: movl (%esi), %ebx )
21533 SRC( movl 4(%esi), %edx )
21534 adcl %ebx, %eax
21535-DST( movl %ebx, (%edi) )
21536+DST( movl %ebx, %es:(%edi) )
21537 adcl %edx, %eax
21538-DST( movl %edx, 4(%edi) )
21539+DST( movl %edx, %es:4(%edi) )
21540
21541 SRC( movl 8(%esi), %ebx )
21542 SRC( movl 12(%esi), %edx )
21543 adcl %ebx, %eax
21544-DST( movl %ebx, 8(%edi) )
21545+DST( movl %ebx, %es:8(%edi) )
21546 adcl %edx, %eax
21547-DST( movl %edx, 12(%edi) )
21548+DST( movl %edx, %es:12(%edi) )
21549
21550 SRC( movl 16(%esi), %ebx )
21551 SRC( movl 20(%esi), %edx )
21552 adcl %ebx, %eax
21553-DST( movl %ebx, 16(%edi) )
21554+DST( movl %ebx, %es:16(%edi) )
21555 adcl %edx, %eax
21556-DST( movl %edx, 20(%edi) )
21557+DST( movl %edx, %es:20(%edi) )
21558
21559 SRC( movl 24(%esi), %ebx )
21560 SRC( movl 28(%esi), %edx )
21561 adcl %ebx, %eax
21562-DST( movl %ebx, 24(%edi) )
21563+DST( movl %ebx, %es:24(%edi) )
21564 adcl %edx, %eax
21565-DST( movl %edx, 28(%edi) )
21566+DST( movl %edx, %es:28(%edi) )
21567
21568 lea 32(%esi), %esi
21569 lea 32(%edi), %edi
21570@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21571 shrl $2, %edx # This clears CF
21572 SRC(3: movl (%esi), %ebx )
21573 adcl %ebx, %eax
21574-DST( movl %ebx, (%edi) )
21575+DST( movl %ebx, %es:(%edi) )
21576 lea 4(%esi), %esi
21577 lea 4(%edi), %edi
21578 dec %edx
21579@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21580 jb 5f
21581 SRC( movw (%esi), %cx )
21582 leal 2(%esi), %esi
21583-DST( movw %cx, (%edi) )
21584+DST( movw %cx, %es:(%edi) )
21585 leal 2(%edi), %edi
21586 je 6f
21587 shll $16,%ecx
21588 SRC(5: movb (%esi), %cl )
21589-DST( movb %cl, (%edi) )
21590+DST( movb %cl, %es:(%edi) )
21591 6: addl %ecx, %eax
21592 adcl $0, %eax
21593 7:
21594@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21595
21596 6001:
21597 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21598- movl $-EFAULT, (%ebx)
21599+ movl $-EFAULT, %ss:(%ebx)
21600
21601 # zero the complete destination - computing the rest
21602 # is too much work
21603@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21604
21605 6002:
21606 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21607- movl $-EFAULT,(%ebx)
21608+ movl $-EFAULT,%ss:(%ebx)
21609 jmp 5000b
21610
21611 .previous
21612
21613+ pushl %ss
21614+ CFI_ADJUST_CFA_OFFSET 4
21615+ popl %ds
21616+ CFI_ADJUST_CFA_OFFSET -4
21617+ pushl %ss
21618+ CFI_ADJUST_CFA_OFFSET 4
21619+ popl %es
21620+ CFI_ADJUST_CFA_OFFSET -4
21621 popl %ebx
21622 CFI_ADJUST_CFA_OFFSET -4
21623 CFI_RESTORE ebx
21624@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21625 CFI_ADJUST_CFA_OFFSET -4
21626 ret
21627 CFI_ENDPROC
21628-ENDPROC(csum_partial_copy_generic)
21629+ENDPROC(csum_partial_copy_generic_to_user)
21630
21631 #else
21632
21633 /* Version for PentiumII/PPro */
21634
21635 #define ROUND1(x) \
21636+ nop; nop; nop; \
21637 SRC(movl x(%esi), %ebx ) ; \
21638 addl %ebx, %eax ; \
21639- DST(movl %ebx, x(%edi) ) ;
21640+ DST(movl %ebx, %es:x(%edi)) ;
21641
21642 #define ROUND(x) \
21643+ nop; nop; nop; \
21644 SRC(movl x(%esi), %ebx ) ; \
21645 adcl %ebx, %eax ; \
21646- DST(movl %ebx, x(%edi) ) ;
21647+ DST(movl %ebx, %es:x(%edi)) ;
21648
21649 #define ARGBASE 12
21650-
21651-ENTRY(csum_partial_copy_generic)
21652+
21653+ENTRY(csum_partial_copy_generic_to_user)
21654 CFI_STARTPROC
21655+
21656+#ifdef CONFIG_PAX_MEMORY_UDEREF
21657+ pushl %gs
21658+ CFI_ADJUST_CFA_OFFSET 4
21659+ popl %es
21660+ CFI_ADJUST_CFA_OFFSET -4
21661+ jmp csum_partial_copy_generic
21662+#endif
21663+
21664+ENTRY(csum_partial_copy_generic_from_user)
21665+
21666+#ifdef CONFIG_PAX_MEMORY_UDEREF
21667+ pushl %gs
21668+ CFI_ADJUST_CFA_OFFSET 4
21669+ popl %ds
21670+ CFI_ADJUST_CFA_OFFSET -4
21671+#endif
21672+
21673+ENTRY(csum_partial_copy_generic)
21674 pushl %ebx
21675 CFI_ADJUST_CFA_OFFSET 4
21676 CFI_REL_OFFSET ebx, 0
21677@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21678 subl %ebx, %edi
21679 lea -1(%esi),%edx
21680 andl $-32,%edx
21681- lea 3f(%ebx,%ebx), %ebx
21682+ lea 3f(%ebx,%ebx,2), %ebx
21683 testl %esi, %esi
21684 jmp *%ebx
21685 1: addl $64,%esi
21686@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21687 jb 5f
21688 SRC( movw (%esi), %dx )
21689 leal 2(%esi), %esi
21690-DST( movw %dx, (%edi) )
21691+DST( movw %dx, %es:(%edi) )
21692 leal 2(%edi), %edi
21693 je 6f
21694 shll $16,%edx
21695 5:
21696 SRC( movb (%esi), %dl )
21697-DST( movb %dl, (%edi) )
21698+DST( movb %dl, %es:(%edi) )
21699 6: addl %edx, %eax
21700 adcl $0, %eax
21701 7:
21702 .section .fixup, "ax"
21703 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21704- movl $-EFAULT, (%ebx)
21705+ movl $-EFAULT, %ss:(%ebx)
21706 # zero the complete destination (computing the rest is too much work)
21707 movl ARGBASE+8(%esp),%edi # dst
21708 movl ARGBASE+12(%esp),%ecx # len
21709@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21710 rep; stosb
21711 jmp 7b
21712 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21713- movl $-EFAULT, (%ebx)
21714+ movl $-EFAULT, %ss:(%ebx)
21715 jmp 7b
21716 .previous
21717
21718+#ifdef CONFIG_PAX_MEMORY_UDEREF
21719+ pushl %ss
21720+ CFI_ADJUST_CFA_OFFSET 4
21721+ popl %ds
21722+ CFI_ADJUST_CFA_OFFSET -4
21723+ pushl %ss
21724+ CFI_ADJUST_CFA_OFFSET 4
21725+ popl %es
21726+ CFI_ADJUST_CFA_OFFSET -4
21727+#endif
21728+
21729 popl %esi
21730 CFI_ADJUST_CFA_OFFSET -4
21731 CFI_RESTORE esi
21732@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21733 CFI_RESTORE ebx
21734 ret
21735 CFI_ENDPROC
21736-ENDPROC(csum_partial_copy_generic)
21737+ENDPROC(csum_partial_copy_generic_to_user)
21738
21739 #undef ROUND
21740 #undef ROUND1
21741diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21742index ebeafcc..1e3a402 100644
21743--- a/arch/x86/lib/clear_page_64.S
21744+++ b/arch/x86/lib/clear_page_64.S
21745@@ -1,5 +1,6 @@
21746 #include <linux/linkage.h>
21747 #include <asm/dwarf2.h>
21748+#include <asm/alternative-asm.h>
21749
21750 /*
21751 * Zero a page.
21752@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21753 movl $4096/8,%ecx
21754 xorl %eax,%eax
21755 rep stosq
21756+ pax_force_retaddr
21757 ret
21758 CFI_ENDPROC
21759 ENDPROC(clear_page_c)
21760@@ -33,6 +35,7 @@ ENTRY(clear_page)
21761 leaq 64(%rdi),%rdi
21762 jnz .Lloop
21763 nop
21764+ pax_force_retaddr
21765 ret
21766 CFI_ENDPROC
21767 .Lclear_page_end:
21768@@ -43,7 +46,7 @@ ENDPROC(clear_page)
21769
21770 #include <asm/cpufeature.h>
21771
21772- .section .altinstr_replacement,"ax"
21773+ .section .altinstr_replacement,"a"
21774 1: .byte 0xeb /* jmp <disp8> */
21775 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21776 2:
21777diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21778index 727a5d4..333818a 100644
21779--- a/arch/x86/lib/copy_page_64.S
21780+++ b/arch/x86/lib/copy_page_64.S
21781@@ -2,12 +2,14 @@
21782
21783 #include <linux/linkage.h>
21784 #include <asm/dwarf2.h>
21785+#include <asm/alternative-asm.h>
21786
21787 ALIGN
21788 copy_page_c:
21789 CFI_STARTPROC
21790 movl $4096/8,%ecx
21791 rep movsq
21792+ pax_force_retaddr
21793 ret
21794 CFI_ENDPROC
21795 ENDPROC(copy_page_c)
21796@@ -38,7 +40,7 @@ ENTRY(copy_page)
21797 movq 16 (%rsi), %rdx
21798 movq 24 (%rsi), %r8
21799 movq 32 (%rsi), %r9
21800- movq 40 (%rsi), %r10
21801+ movq 40 (%rsi), %r13
21802 movq 48 (%rsi), %r11
21803 movq 56 (%rsi), %r12
21804
21805@@ -49,7 +51,7 @@ ENTRY(copy_page)
21806 movq %rdx, 16 (%rdi)
21807 movq %r8, 24 (%rdi)
21808 movq %r9, 32 (%rdi)
21809- movq %r10, 40 (%rdi)
21810+ movq %r13, 40 (%rdi)
21811 movq %r11, 48 (%rdi)
21812 movq %r12, 56 (%rdi)
21813
21814@@ -68,7 +70,7 @@ ENTRY(copy_page)
21815 movq 16 (%rsi), %rdx
21816 movq 24 (%rsi), %r8
21817 movq 32 (%rsi), %r9
21818- movq 40 (%rsi), %r10
21819+ movq 40 (%rsi), %r13
21820 movq 48 (%rsi), %r11
21821 movq 56 (%rsi), %r12
21822
21823@@ -77,7 +79,7 @@ ENTRY(copy_page)
21824 movq %rdx, 16 (%rdi)
21825 movq %r8, 24 (%rdi)
21826 movq %r9, 32 (%rdi)
21827- movq %r10, 40 (%rdi)
21828+ movq %r13, 40 (%rdi)
21829 movq %r11, 48 (%rdi)
21830 movq %r12, 56 (%rdi)
21831
21832@@ -94,6 +96,7 @@ ENTRY(copy_page)
21833 CFI_RESTORE r13
21834 addq $3*8,%rsp
21835 CFI_ADJUST_CFA_OFFSET -3*8
21836+ pax_force_retaddr
21837 ret
21838 .Lcopy_page_end:
21839 CFI_ENDPROC
21840@@ -104,7 +107,7 @@ ENDPROC(copy_page)
21841
21842 #include <asm/cpufeature.h>
21843
21844- .section .altinstr_replacement,"ax"
21845+ .section .altinstr_replacement,"a"
21846 1: .byte 0xeb /* jmp <disp8> */
21847 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21848 2:
21849diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21850index af8debd..40c75f3 100644
21851--- a/arch/x86/lib/copy_user_64.S
21852+++ b/arch/x86/lib/copy_user_64.S
21853@@ -15,13 +15,15 @@
21854 #include <asm/asm-offsets.h>
21855 #include <asm/thread_info.h>
21856 #include <asm/cpufeature.h>
21857+#include <asm/pgtable.h>
21858+#include <asm/alternative-asm.h>
21859
21860 .macro ALTERNATIVE_JUMP feature,orig,alt
21861 0:
21862 .byte 0xe9 /* 32bit jump */
21863 .long \orig-1f /* by default jump to orig */
21864 1:
21865- .section .altinstr_replacement,"ax"
21866+ .section .altinstr_replacement,"a"
21867 2: .byte 0xe9 /* near jump with 32bit immediate */
21868 .long \alt-1b /* offset */ /* or alternatively to alt */
21869 .previous
21870@@ -64,55 +66,26 @@
21871 #endif
21872 .endm
21873
21874-/* Standard copy_to_user with segment limit checking */
21875-ENTRY(copy_to_user)
21876- CFI_STARTPROC
21877- GET_THREAD_INFO(%rax)
21878- movq %rdi,%rcx
21879- addq %rdx,%rcx
21880- jc bad_to_user
21881- cmpq TI_addr_limit(%rax),%rcx
21882- ja bad_to_user
21883- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21884- CFI_ENDPROC
21885-ENDPROC(copy_to_user)
21886-
21887-/* Standard copy_from_user with segment limit checking */
21888-ENTRY(copy_from_user)
21889- CFI_STARTPROC
21890- GET_THREAD_INFO(%rax)
21891- movq %rsi,%rcx
21892- addq %rdx,%rcx
21893- jc bad_from_user
21894- cmpq TI_addr_limit(%rax),%rcx
21895- ja bad_from_user
21896- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21897- CFI_ENDPROC
21898-ENDPROC(copy_from_user)
21899-
21900 ENTRY(copy_user_generic)
21901 CFI_STARTPROC
21902 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21903 CFI_ENDPROC
21904 ENDPROC(copy_user_generic)
21905
21906-ENTRY(__copy_from_user_inatomic)
21907- CFI_STARTPROC
21908- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21909- CFI_ENDPROC
21910-ENDPROC(__copy_from_user_inatomic)
21911-
21912 .section .fixup,"ax"
21913 /* must zero dest */
21914 ENTRY(bad_from_user)
21915 bad_from_user:
21916 CFI_STARTPROC
21917+ testl %edx,%edx
21918+ js bad_to_user
21919 movl %edx,%ecx
21920 xorl %eax,%eax
21921 rep
21922 stosb
21923 bad_to_user:
21924 movl %edx,%eax
21925+ pax_force_retaddr
21926 ret
21927 CFI_ENDPROC
21928 ENDPROC(bad_from_user)
21929@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21930 jz 17f
21931 1: movq (%rsi),%r8
21932 2: movq 1*8(%rsi),%r9
21933-3: movq 2*8(%rsi),%r10
21934+3: movq 2*8(%rsi),%rax
21935 4: movq 3*8(%rsi),%r11
21936 5: movq %r8,(%rdi)
21937 6: movq %r9,1*8(%rdi)
21938-7: movq %r10,2*8(%rdi)
21939+7: movq %rax,2*8(%rdi)
21940 8: movq %r11,3*8(%rdi)
21941 9: movq 4*8(%rsi),%r8
21942 10: movq 5*8(%rsi),%r9
21943-11: movq 6*8(%rsi),%r10
21944+11: movq 6*8(%rsi),%rax
21945 12: movq 7*8(%rsi),%r11
21946 13: movq %r8,4*8(%rdi)
21947 14: movq %r9,5*8(%rdi)
21948-15: movq %r10,6*8(%rdi)
21949+15: movq %rax,6*8(%rdi)
21950 16: movq %r11,7*8(%rdi)
21951 leaq 64(%rsi),%rsi
21952 leaq 64(%rdi),%rdi
21953@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21954 decl %ecx
21955 jnz 21b
21956 23: xor %eax,%eax
21957+ pax_force_retaddr
21958 ret
21959
21960 .section .fixup,"ax"
21961@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
21962 3: rep
21963 movsb
21964 4: xorl %eax,%eax
21965+ pax_force_retaddr
21966 ret
21967
21968 .section .fixup,"ax"
21969diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21970index cb0c112..e3a6895 100644
21971--- a/arch/x86/lib/copy_user_nocache_64.S
21972+++ b/arch/x86/lib/copy_user_nocache_64.S
21973@@ -8,12 +8,14 @@
21974
21975 #include <linux/linkage.h>
21976 #include <asm/dwarf2.h>
21977+#include <asm/alternative-asm.h>
21978
21979 #define FIX_ALIGNMENT 1
21980
21981 #include <asm/current.h>
21982 #include <asm/asm-offsets.h>
21983 #include <asm/thread_info.h>
21984+#include <asm/pgtable.h>
21985
21986 .macro ALIGN_DESTINATION
21987 #ifdef FIX_ALIGNMENT
21988@@ -50,6 +52,15 @@
21989 */
21990 ENTRY(__copy_user_nocache)
21991 CFI_STARTPROC
21992+
21993+#ifdef CONFIG_PAX_MEMORY_UDEREF
21994+ mov $PAX_USER_SHADOW_BASE,%rcx
21995+ cmp %rcx,%rsi
21996+ jae 1f
21997+ add %rcx,%rsi
21998+1:
21999+#endif
22000+
22001 cmpl $8,%edx
22002 jb 20f /* less then 8 bytes, go to byte copy loop */
22003 ALIGN_DESTINATION
22004@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22005 jz 17f
22006 1: movq (%rsi),%r8
22007 2: movq 1*8(%rsi),%r9
22008-3: movq 2*8(%rsi),%r10
22009+3: movq 2*8(%rsi),%rax
22010 4: movq 3*8(%rsi),%r11
22011 5: movnti %r8,(%rdi)
22012 6: movnti %r9,1*8(%rdi)
22013-7: movnti %r10,2*8(%rdi)
22014+7: movnti %rax,2*8(%rdi)
22015 8: movnti %r11,3*8(%rdi)
22016 9: movq 4*8(%rsi),%r8
22017 10: movq 5*8(%rsi),%r9
22018-11: movq 6*8(%rsi),%r10
22019+11: movq 6*8(%rsi),%rax
22020 12: movq 7*8(%rsi),%r11
22021 13: movnti %r8,4*8(%rdi)
22022 14: movnti %r9,5*8(%rdi)
22023-15: movnti %r10,6*8(%rdi)
22024+15: movnti %rax,6*8(%rdi)
22025 16: movnti %r11,7*8(%rdi)
22026 leaq 64(%rsi),%rsi
22027 leaq 64(%rdi),%rdi
22028@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22029 jnz 21b
22030 23: xorl %eax,%eax
22031 sfence
22032+ pax_force_retaddr
22033 ret
22034
22035 .section .fixup,"ax"
22036diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22037index f0dba36..48cb4d6 100644
22038--- a/arch/x86/lib/csum-copy_64.S
22039+++ b/arch/x86/lib/csum-copy_64.S
22040@@ -8,6 +8,7 @@
22041 #include <linux/linkage.h>
22042 #include <asm/dwarf2.h>
22043 #include <asm/errno.h>
22044+#include <asm/alternative-asm.h>
22045
22046 /*
22047 * Checksum copy with exception handling.
22048@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22049 CFI_RESTORE rbp
22050 addq $7*8,%rsp
22051 CFI_ADJUST_CFA_OFFSET -7*8
22052+ pax_force_retaddr 0, 1
22053 ret
22054 CFI_RESTORE_STATE
22055
22056diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22057index 459b58a..9570bc7 100644
22058--- a/arch/x86/lib/csum-wrappers_64.c
22059+++ b/arch/x86/lib/csum-wrappers_64.c
22060@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22061 len -= 2;
22062 }
22063 }
22064- isum = csum_partial_copy_generic((__force const void *)src,
22065+
22066+#ifdef CONFIG_PAX_MEMORY_UDEREF
22067+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22068+ src += PAX_USER_SHADOW_BASE;
22069+#endif
22070+
22071+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22072 dst, len, isum, errp, NULL);
22073 if (unlikely(*errp))
22074 goto out_err;
22075@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22076 }
22077
22078 *errp = 0;
22079- return csum_partial_copy_generic(src, (void __force *)dst,
22080+
22081+#ifdef CONFIG_PAX_MEMORY_UDEREF
22082+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22083+ dst += PAX_USER_SHADOW_BASE;
22084+#endif
22085+
22086+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22087 len, isum, NULL, errp);
22088 }
22089 EXPORT_SYMBOL(csum_partial_copy_to_user);
22090diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22091index 51f1504..ddac4c1 100644
22092--- a/arch/x86/lib/getuser.S
22093+++ b/arch/x86/lib/getuser.S
22094@@ -33,15 +33,38 @@
22095 #include <asm/asm-offsets.h>
22096 #include <asm/thread_info.h>
22097 #include <asm/asm.h>
22098+#include <asm/segment.h>
22099+#include <asm/pgtable.h>
22100+#include <asm/alternative-asm.h>
22101+
22102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22103+#define __copyuser_seg gs;
22104+#else
22105+#define __copyuser_seg
22106+#endif
22107
22108 .text
22109 ENTRY(__get_user_1)
22110 CFI_STARTPROC
22111+
22112+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22113 GET_THREAD_INFO(%_ASM_DX)
22114 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22115 jae bad_get_user
22116-1: movzb (%_ASM_AX),%edx
22117+
22118+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22119+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22120+ cmp %_ASM_DX,%_ASM_AX
22121+ jae 1234f
22122+ add %_ASM_DX,%_ASM_AX
22123+1234:
22124+#endif
22125+
22126+#endif
22127+
22128+1: __copyuser_seg movzb (%_ASM_AX),%edx
22129 xor %eax,%eax
22130+ pax_force_retaddr
22131 ret
22132 CFI_ENDPROC
22133 ENDPROC(__get_user_1)
22134@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22135 ENTRY(__get_user_2)
22136 CFI_STARTPROC
22137 add $1,%_ASM_AX
22138+
22139+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22140 jc bad_get_user
22141 GET_THREAD_INFO(%_ASM_DX)
22142 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22143 jae bad_get_user
22144-2: movzwl -1(%_ASM_AX),%edx
22145+
22146+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22147+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22148+ cmp %_ASM_DX,%_ASM_AX
22149+ jae 1234f
22150+ add %_ASM_DX,%_ASM_AX
22151+1234:
22152+#endif
22153+
22154+#endif
22155+
22156+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22157 xor %eax,%eax
22158+ pax_force_retaddr
22159 ret
22160 CFI_ENDPROC
22161 ENDPROC(__get_user_2)
22162@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22163 ENTRY(__get_user_4)
22164 CFI_STARTPROC
22165 add $3,%_ASM_AX
22166+
22167+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22168 jc bad_get_user
22169 GET_THREAD_INFO(%_ASM_DX)
22170 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22171 jae bad_get_user
22172-3: mov -3(%_ASM_AX),%edx
22173+
22174+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22175+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22176+ cmp %_ASM_DX,%_ASM_AX
22177+ jae 1234f
22178+ add %_ASM_DX,%_ASM_AX
22179+1234:
22180+#endif
22181+
22182+#endif
22183+
22184+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22185 xor %eax,%eax
22186+ pax_force_retaddr
22187 ret
22188 CFI_ENDPROC
22189 ENDPROC(__get_user_4)
22190@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22191 GET_THREAD_INFO(%_ASM_DX)
22192 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22193 jae bad_get_user
22194+
22195+#ifdef CONFIG_PAX_MEMORY_UDEREF
22196+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22197+ cmp %_ASM_DX,%_ASM_AX
22198+ jae 1234f
22199+ add %_ASM_DX,%_ASM_AX
22200+1234:
22201+#endif
22202+
22203 4: movq -7(%_ASM_AX),%_ASM_DX
22204 xor %eax,%eax
22205+ pax_force_retaddr
22206 ret
22207 CFI_ENDPROC
22208 ENDPROC(__get_user_8)
22209@@ -91,6 +152,7 @@ bad_get_user:
22210 CFI_STARTPROC
22211 xor %edx,%edx
22212 mov $(-EFAULT),%_ASM_AX
22213+ pax_force_retaddr
22214 ret
22215 CFI_ENDPROC
22216 END(bad_get_user)
22217diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22218index 05a95e7..326f2fa 100644
22219--- a/arch/x86/lib/iomap_copy_64.S
22220+++ b/arch/x86/lib/iomap_copy_64.S
22221@@ -17,6 +17,7 @@
22222
22223 #include <linux/linkage.h>
22224 #include <asm/dwarf2.h>
22225+#include <asm/alternative-asm.h>
22226
22227 /*
22228 * override generic version in lib/iomap_copy.c
22229@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22230 CFI_STARTPROC
22231 movl %edx,%ecx
22232 rep movsd
22233+ pax_force_retaddr
22234 ret
22235 CFI_ENDPROC
22236 ENDPROC(__iowrite32_copy)
22237diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22238index ad5441e..610e351 100644
22239--- a/arch/x86/lib/memcpy_64.S
22240+++ b/arch/x86/lib/memcpy_64.S
22241@@ -4,6 +4,7 @@
22242
22243 #include <asm/cpufeature.h>
22244 #include <asm/dwarf2.h>
22245+#include <asm/alternative-asm.h>
22246
22247 /*
22248 * memcpy - Copy a memory block.
22249@@ -34,6 +35,7 @@ memcpy_c:
22250 rep movsq
22251 movl %edx, %ecx
22252 rep movsb
22253+ pax_force_retaddr
22254 ret
22255 CFI_ENDPROC
22256 ENDPROC(memcpy_c)
22257@@ -118,6 +120,7 @@ ENTRY(memcpy)
22258 jnz .Lloop_1
22259
22260 .Lend:
22261+ pax_force_retaddr 0, 1
22262 ret
22263 CFI_ENDPROC
22264 ENDPROC(memcpy)
22265@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22266 * It is also a lot simpler. Use this when possible:
22267 */
22268
22269- .section .altinstr_replacement, "ax"
22270+ .section .altinstr_replacement, "a"
22271 1: .byte 0xeb /* jmp <disp8> */
22272 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22273 2:
22274diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22275index 2c59481..7e9ba4e 100644
22276--- a/arch/x86/lib/memset_64.S
22277+++ b/arch/x86/lib/memset_64.S
22278@@ -2,6 +2,7 @@
22279
22280 #include <linux/linkage.h>
22281 #include <asm/dwarf2.h>
22282+#include <asm/alternative-asm.h>
22283
22284 /*
22285 * ISO C memset - set a memory block to a byte value.
22286@@ -28,6 +29,7 @@ memset_c:
22287 movl %r8d,%ecx
22288 rep stosb
22289 movq %r9,%rax
22290+ pax_force_retaddr
22291 ret
22292 CFI_ENDPROC
22293 ENDPROC(memset_c)
22294@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22295 ENTRY(memset)
22296 ENTRY(__memset)
22297 CFI_STARTPROC
22298- movq %rdi,%r10
22299 movq %rdx,%r11
22300
22301 /* expand byte value */
22302 movzbl %sil,%ecx
22303 movabs $0x0101010101010101,%rax
22304 mul %rcx /* with rax, clobbers rdx */
22305+ movq %rdi,%rdx
22306
22307 /* align dst */
22308 movl %edi,%r9d
22309@@ -95,7 +97,8 @@ ENTRY(__memset)
22310 jnz .Lloop_1
22311
22312 .Lende:
22313- movq %r10,%rax
22314+ movq %rdx,%rax
22315+ pax_force_retaddr
22316 ret
22317
22318 CFI_RESTORE_STATE
22319@@ -118,7 +121,7 @@ ENDPROC(__memset)
22320
22321 #include <asm/cpufeature.h>
22322
22323- .section .altinstr_replacement,"ax"
22324+ .section .altinstr_replacement,"a"
22325 1: .byte 0xeb /* jmp <disp8> */
22326 .byte (memset_c - memset) - (2f - 1b) /* offset */
22327 2:
22328diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22329index c9f2d9b..e7fd2c0 100644
22330--- a/arch/x86/lib/mmx_32.c
22331+++ b/arch/x86/lib/mmx_32.c
22332@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22333 {
22334 void *p;
22335 int i;
22336+ unsigned long cr0;
22337
22338 if (unlikely(in_interrupt()))
22339 return __memcpy(to, from, len);
22340@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22341 kernel_fpu_begin();
22342
22343 __asm__ __volatile__ (
22344- "1: prefetch (%0)\n" /* This set is 28 bytes */
22345- " prefetch 64(%0)\n"
22346- " prefetch 128(%0)\n"
22347- " prefetch 192(%0)\n"
22348- " prefetch 256(%0)\n"
22349+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22350+ " prefetch 64(%1)\n"
22351+ " prefetch 128(%1)\n"
22352+ " prefetch 192(%1)\n"
22353+ " prefetch 256(%1)\n"
22354 "2: \n"
22355 ".section .fixup, \"ax\"\n"
22356- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22357+ "3: \n"
22358+
22359+#ifdef CONFIG_PAX_KERNEXEC
22360+ " movl %%cr0, %0\n"
22361+ " movl %0, %%eax\n"
22362+ " andl $0xFFFEFFFF, %%eax\n"
22363+ " movl %%eax, %%cr0\n"
22364+#endif
22365+
22366+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22367+
22368+#ifdef CONFIG_PAX_KERNEXEC
22369+ " movl %0, %%cr0\n"
22370+#endif
22371+
22372 " jmp 2b\n"
22373 ".previous\n"
22374 _ASM_EXTABLE(1b, 3b)
22375- : : "r" (from));
22376+ : "=&r" (cr0) : "r" (from) : "ax");
22377
22378 for ( ; i > 5; i--) {
22379 __asm__ __volatile__ (
22380- "1: prefetch 320(%0)\n"
22381- "2: movq (%0), %%mm0\n"
22382- " movq 8(%0), %%mm1\n"
22383- " movq 16(%0), %%mm2\n"
22384- " movq 24(%0), %%mm3\n"
22385- " movq %%mm0, (%1)\n"
22386- " movq %%mm1, 8(%1)\n"
22387- " movq %%mm2, 16(%1)\n"
22388- " movq %%mm3, 24(%1)\n"
22389- " movq 32(%0), %%mm0\n"
22390- " movq 40(%0), %%mm1\n"
22391- " movq 48(%0), %%mm2\n"
22392- " movq 56(%0), %%mm3\n"
22393- " movq %%mm0, 32(%1)\n"
22394- " movq %%mm1, 40(%1)\n"
22395- " movq %%mm2, 48(%1)\n"
22396- " movq %%mm3, 56(%1)\n"
22397+ "1: prefetch 320(%1)\n"
22398+ "2: movq (%1), %%mm0\n"
22399+ " movq 8(%1), %%mm1\n"
22400+ " movq 16(%1), %%mm2\n"
22401+ " movq 24(%1), %%mm3\n"
22402+ " movq %%mm0, (%2)\n"
22403+ " movq %%mm1, 8(%2)\n"
22404+ " movq %%mm2, 16(%2)\n"
22405+ " movq %%mm3, 24(%2)\n"
22406+ " movq 32(%1), %%mm0\n"
22407+ " movq 40(%1), %%mm1\n"
22408+ " movq 48(%1), %%mm2\n"
22409+ " movq 56(%1), %%mm3\n"
22410+ " movq %%mm0, 32(%2)\n"
22411+ " movq %%mm1, 40(%2)\n"
22412+ " movq %%mm2, 48(%2)\n"
22413+ " movq %%mm3, 56(%2)\n"
22414 ".section .fixup, \"ax\"\n"
22415- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22416+ "3:\n"
22417+
22418+#ifdef CONFIG_PAX_KERNEXEC
22419+ " movl %%cr0, %0\n"
22420+ " movl %0, %%eax\n"
22421+ " andl $0xFFFEFFFF, %%eax\n"
22422+ " movl %%eax, %%cr0\n"
22423+#endif
22424+
22425+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22426+
22427+#ifdef CONFIG_PAX_KERNEXEC
22428+ " movl %0, %%cr0\n"
22429+#endif
22430+
22431 " jmp 2b\n"
22432 ".previous\n"
22433 _ASM_EXTABLE(1b, 3b)
22434- : : "r" (from), "r" (to) : "memory");
22435+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22436
22437 from += 64;
22438 to += 64;
22439@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22440 static void fast_copy_page(void *to, void *from)
22441 {
22442 int i;
22443+ unsigned long cr0;
22444
22445 kernel_fpu_begin();
22446
22447@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22448 * but that is for later. -AV
22449 */
22450 __asm__ __volatile__(
22451- "1: prefetch (%0)\n"
22452- " prefetch 64(%0)\n"
22453- " prefetch 128(%0)\n"
22454- " prefetch 192(%0)\n"
22455- " prefetch 256(%0)\n"
22456+ "1: prefetch (%1)\n"
22457+ " prefetch 64(%1)\n"
22458+ " prefetch 128(%1)\n"
22459+ " prefetch 192(%1)\n"
22460+ " prefetch 256(%1)\n"
22461 "2: \n"
22462 ".section .fixup, \"ax\"\n"
22463- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22464+ "3: \n"
22465+
22466+#ifdef CONFIG_PAX_KERNEXEC
22467+ " movl %%cr0, %0\n"
22468+ " movl %0, %%eax\n"
22469+ " andl $0xFFFEFFFF, %%eax\n"
22470+ " movl %%eax, %%cr0\n"
22471+#endif
22472+
22473+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22474+
22475+#ifdef CONFIG_PAX_KERNEXEC
22476+ " movl %0, %%cr0\n"
22477+#endif
22478+
22479 " jmp 2b\n"
22480 ".previous\n"
22481- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22482+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22483
22484 for (i = 0; i < (4096-320)/64; i++) {
22485 __asm__ __volatile__ (
22486- "1: prefetch 320(%0)\n"
22487- "2: movq (%0), %%mm0\n"
22488- " movntq %%mm0, (%1)\n"
22489- " movq 8(%0), %%mm1\n"
22490- " movntq %%mm1, 8(%1)\n"
22491- " movq 16(%0), %%mm2\n"
22492- " movntq %%mm2, 16(%1)\n"
22493- " movq 24(%0), %%mm3\n"
22494- " movntq %%mm3, 24(%1)\n"
22495- " movq 32(%0), %%mm4\n"
22496- " movntq %%mm4, 32(%1)\n"
22497- " movq 40(%0), %%mm5\n"
22498- " movntq %%mm5, 40(%1)\n"
22499- " movq 48(%0), %%mm6\n"
22500- " movntq %%mm6, 48(%1)\n"
22501- " movq 56(%0), %%mm7\n"
22502- " movntq %%mm7, 56(%1)\n"
22503+ "1: prefetch 320(%1)\n"
22504+ "2: movq (%1), %%mm0\n"
22505+ " movntq %%mm0, (%2)\n"
22506+ " movq 8(%1), %%mm1\n"
22507+ " movntq %%mm1, 8(%2)\n"
22508+ " movq 16(%1), %%mm2\n"
22509+ " movntq %%mm2, 16(%2)\n"
22510+ " movq 24(%1), %%mm3\n"
22511+ " movntq %%mm3, 24(%2)\n"
22512+ " movq 32(%1), %%mm4\n"
22513+ " movntq %%mm4, 32(%2)\n"
22514+ " movq 40(%1), %%mm5\n"
22515+ " movntq %%mm5, 40(%2)\n"
22516+ " movq 48(%1), %%mm6\n"
22517+ " movntq %%mm6, 48(%2)\n"
22518+ " movq 56(%1), %%mm7\n"
22519+ " movntq %%mm7, 56(%2)\n"
22520 ".section .fixup, \"ax\"\n"
22521- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22522+ "3:\n"
22523+
22524+#ifdef CONFIG_PAX_KERNEXEC
22525+ " movl %%cr0, %0\n"
22526+ " movl %0, %%eax\n"
22527+ " andl $0xFFFEFFFF, %%eax\n"
22528+ " movl %%eax, %%cr0\n"
22529+#endif
22530+
22531+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22532+
22533+#ifdef CONFIG_PAX_KERNEXEC
22534+ " movl %0, %%cr0\n"
22535+#endif
22536+
22537 " jmp 2b\n"
22538 ".previous\n"
22539- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22540+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22541
22542 from += 64;
22543 to += 64;
22544@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22545 static void fast_copy_page(void *to, void *from)
22546 {
22547 int i;
22548+ unsigned long cr0;
22549
22550 kernel_fpu_begin();
22551
22552 __asm__ __volatile__ (
22553- "1: prefetch (%0)\n"
22554- " prefetch 64(%0)\n"
22555- " prefetch 128(%0)\n"
22556- " prefetch 192(%0)\n"
22557- " prefetch 256(%0)\n"
22558+ "1: prefetch (%1)\n"
22559+ " prefetch 64(%1)\n"
22560+ " prefetch 128(%1)\n"
22561+ " prefetch 192(%1)\n"
22562+ " prefetch 256(%1)\n"
22563 "2: \n"
22564 ".section .fixup, \"ax\"\n"
22565- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22566+ "3: \n"
22567+
22568+#ifdef CONFIG_PAX_KERNEXEC
22569+ " movl %%cr0, %0\n"
22570+ " movl %0, %%eax\n"
22571+ " andl $0xFFFEFFFF, %%eax\n"
22572+ " movl %%eax, %%cr0\n"
22573+#endif
22574+
22575+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22576+
22577+#ifdef CONFIG_PAX_KERNEXEC
22578+ " movl %0, %%cr0\n"
22579+#endif
22580+
22581 " jmp 2b\n"
22582 ".previous\n"
22583- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22584+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22585
22586 for (i = 0; i < 4096/64; i++) {
22587 __asm__ __volatile__ (
22588- "1: prefetch 320(%0)\n"
22589- "2: movq (%0), %%mm0\n"
22590- " movq 8(%0), %%mm1\n"
22591- " movq 16(%0), %%mm2\n"
22592- " movq 24(%0), %%mm3\n"
22593- " movq %%mm0, (%1)\n"
22594- " movq %%mm1, 8(%1)\n"
22595- " movq %%mm2, 16(%1)\n"
22596- " movq %%mm3, 24(%1)\n"
22597- " movq 32(%0), %%mm0\n"
22598- " movq 40(%0), %%mm1\n"
22599- " movq 48(%0), %%mm2\n"
22600- " movq 56(%0), %%mm3\n"
22601- " movq %%mm0, 32(%1)\n"
22602- " movq %%mm1, 40(%1)\n"
22603- " movq %%mm2, 48(%1)\n"
22604- " movq %%mm3, 56(%1)\n"
22605+ "1: prefetch 320(%1)\n"
22606+ "2: movq (%1), %%mm0\n"
22607+ " movq 8(%1), %%mm1\n"
22608+ " movq 16(%1), %%mm2\n"
22609+ " movq 24(%1), %%mm3\n"
22610+ " movq %%mm0, (%2)\n"
22611+ " movq %%mm1, 8(%2)\n"
22612+ " movq %%mm2, 16(%2)\n"
22613+ " movq %%mm3, 24(%2)\n"
22614+ " movq 32(%1), %%mm0\n"
22615+ " movq 40(%1), %%mm1\n"
22616+ " movq 48(%1), %%mm2\n"
22617+ " movq 56(%1), %%mm3\n"
22618+ " movq %%mm0, 32(%2)\n"
22619+ " movq %%mm1, 40(%2)\n"
22620+ " movq %%mm2, 48(%2)\n"
22621+ " movq %%mm3, 56(%2)\n"
22622 ".section .fixup, \"ax\"\n"
22623- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22624+ "3:\n"
22625+
22626+#ifdef CONFIG_PAX_KERNEXEC
22627+ " movl %%cr0, %0\n"
22628+ " movl %0, %%eax\n"
22629+ " andl $0xFFFEFFFF, %%eax\n"
22630+ " movl %%eax, %%cr0\n"
22631+#endif
22632+
22633+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22634+
22635+#ifdef CONFIG_PAX_KERNEXEC
22636+ " movl %0, %%cr0\n"
22637+#endif
22638+
22639 " jmp 2b\n"
22640 ".previous\n"
22641 _ASM_EXTABLE(1b, 3b)
22642- : : "r" (from), "r" (to) : "memory");
22643+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22644
22645 from += 64;
22646 to += 64;
22647diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22648index 69fa106..adda88b 100644
22649--- a/arch/x86/lib/msr-reg.S
22650+++ b/arch/x86/lib/msr-reg.S
22651@@ -3,6 +3,7 @@
22652 #include <asm/dwarf2.h>
22653 #include <asm/asm.h>
22654 #include <asm/msr.h>
22655+#include <asm/alternative-asm.h>
22656
22657 #ifdef CONFIG_X86_64
22658 /*
22659@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22660 CFI_STARTPROC
22661 pushq_cfi %rbx
22662 pushq_cfi %rbp
22663- movq %rdi, %r10 /* Save pointer */
22664+ movq %rdi, %r9 /* Save pointer */
22665 xorl %r11d, %r11d /* Return value */
22666 movl (%rdi), %eax
22667 movl 4(%rdi), %ecx
22668@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22669 movl 28(%rdi), %edi
22670 CFI_REMEMBER_STATE
22671 1: \op
22672-2: movl %eax, (%r10)
22673+2: movl %eax, (%r9)
22674 movl %r11d, %eax /* Return value */
22675- movl %ecx, 4(%r10)
22676- movl %edx, 8(%r10)
22677- movl %ebx, 12(%r10)
22678- movl %ebp, 20(%r10)
22679- movl %esi, 24(%r10)
22680- movl %edi, 28(%r10)
22681+ movl %ecx, 4(%r9)
22682+ movl %edx, 8(%r9)
22683+ movl %ebx, 12(%r9)
22684+ movl %ebp, 20(%r9)
22685+ movl %esi, 24(%r9)
22686+ movl %edi, 28(%r9)
22687 popq_cfi %rbp
22688 popq_cfi %rbx
22689+ pax_force_retaddr
22690 ret
22691 3:
22692 CFI_RESTORE_STATE
22693diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22694index 36b0d15..d381858 100644
22695--- a/arch/x86/lib/putuser.S
22696+++ b/arch/x86/lib/putuser.S
22697@@ -15,7 +15,9 @@
22698 #include <asm/thread_info.h>
22699 #include <asm/errno.h>
22700 #include <asm/asm.h>
22701-
22702+#include <asm/segment.h>
22703+#include <asm/pgtable.h>
22704+#include <asm/alternative-asm.h>
22705
22706 /*
22707 * __put_user_X
22708@@ -29,52 +31,119 @@
22709 * as they get called from within inline assembly.
22710 */
22711
22712-#define ENTER CFI_STARTPROC ; \
22713- GET_THREAD_INFO(%_ASM_BX)
22714-#define EXIT ret ; \
22715+#define ENTER CFI_STARTPROC
22716+#define EXIT pax_force_retaddr; ret ; \
22717 CFI_ENDPROC
22718
22719+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22720+#define _DEST %_ASM_CX,%_ASM_BX
22721+#else
22722+#define _DEST %_ASM_CX
22723+#endif
22724+
22725+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22726+#define __copyuser_seg gs;
22727+#else
22728+#define __copyuser_seg
22729+#endif
22730+
22731 .text
22732 ENTRY(__put_user_1)
22733 ENTER
22734+
22735+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22736+ GET_THREAD_INFO(%_ASM_BX)
22737 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22738 jae bad_put_user
22739-1: movb %al,(%_ASM_CX)
22740+
22741+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22742+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22743+ cmp %_ASM_BX,%_ASM_CX
22744+ jb 1234f
22745+ xor %ebx,%ebx
22746+1234:
22747+#endif
22748+
22749+#endif
22750+
22751+1: __copyuser_seg movb %al,(_DEST)
22752 xor %eax,%eax
22753 EXIT
22754 ENDPROC(__put_user_1)
22755
22756 ENTRY(__put_user_2)
22757 ENTER
22758+
22759+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22760+ GET_THREAD_INFO(%_ASM_BX)
22761 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22762 sub $1,%_ASM_BX
22763 cmp %_ASM_BX,%_ASM_CX
22764 jae bad_put_user
22765-2: movw %ax,(%_ASM_CX)
22766+
22767+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22768+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22769+ cmp %_ASM_BX,%_ASM_CX
22770+ jb 1234f
22771+ xor %ebx,%ebx
22772+1234:
22773+#endif
22774+
22775+#endif
22776+
22777+2: __copyuser_seg movw %ax,(_DEST)
22778 xor %eax,%eax
22779 EXIT
22780 ENDPROC(__put_user_2)
22781
22782 ENTRY(__put_user_4)
22783 ENTER
22784+
22785+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22786+ GET_THREAD_INFO(%_ASM_BX)
22787 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22788 sub $3,%_ASM_BX
22789 cmp %_ASM_BX,%_ASM_CX
22790 jae bad_put_user
22791-3: movl %eax,(%_ASM_CX)
22792+
22793+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22794+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22795+ cmp %_ASM_BX,%_ASM_CX
22796+ jb 1234f
22797+ xor %ebx,%ebx
22798+1234:
22799+#endif
22800+
22801+#endif
22802+
22803+3: __copyuser_seg movl %eax,(_DEST)
22804 xor %eax,%eax
22805 EXIT
22806 ENDPROC(__put_user_4)
22807
22808 ENTRY(__put_user_8)
22809 ENTER
22810+
22811+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22812+ GET_THREAD_INFO(%_ASM_BX)
22813 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22814 sub $7,%_ASM_BX
22815 cmp %_ASM_BX,%_ASM_CX
22816 jae bad_put_user
22817-4: mov %_ASM_AX,(%_ASM_CX)
22818+
22819+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22820+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22821+ cmp %_ASM_BX,%_ASM_CX
22822+ jb 1234f
22823+ xor %ebx,%ebx
22824+1234:
22825+#endif
22826+
22827+#endif
22828+
22829+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22830 #ifdef CONFIG_X86_32
22831-5: movl %edx,4(%_ASM_CX)
22832+5: __copyuser_seg movl %edx,4(_DEST)
22833 #endif
22834 xor %eax,%eax
22835 EXIT
22836diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22837index 05ea55f..f81311a 100644
22838--- a/arch/x86/lib/rwlock_64.S
22839+++ b/arch/x86/lib/rwlock_64.S
22840@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
22841 LOCK_PREFIX
22842 subl $RW_LOCK_BIAS,(%rdi)
22843 jnz __write_lock_failed
22844+ pax_force_retaddr
22845 ret
22846 CFI_ENDPROC
22847 END(__write_lock_failed)
22848@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
22849 LOCK_PREFIX
22850 decl (%rdi)
22851 js __read_lock_failed
22852+ pax_force_retaddr
22853 ret
22854 CFI_ENDPROC
22855 END(__read_lock_failed)
22856diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
22857index 15acecf..f768b10 100644
22858--- a/arch/x86/lib/rwsem_64.S
22859+++ b/arch/x86/lib/rwsem_64.S
22860@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
22861 call rwsem_down_read_failed
22862 popq %rdx
22863 restore_common_regs
22864+ pax_force_retaddr
22865 ret
22866 ENDPROC(call_rwsem_down_read_failed)
22867
22868@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
22869 movq %rax,%rdi
22870 call rwsem_down_write_failed
22871 restore_common_regs
22872+ pax_force_retaddr
22873 ret
22874 ENDPROC(call_rwsem_down_write_failed)
22875
22876@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
22877 movq %rax,%rdi
22878 call rwsem_wake
22879 restore_common_regs
22880-1: ret
22881+1: pax_force_retaddr
22882+ ret
22883 ENDPROC(call_rwsem_wake)
22884
22885 /* Fix up special calling conventions */
22886@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
22887 call rwsem_downgrade_wake
22888 popq %rdx
22889 restore_common_regs
22890+ pax_force_retaddr
22891 ret
22892 ENDPROC(call_rwsem_downgrade_wake)
22893diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22894index bf9a7d5..fb06ab5 100644
22895--- a/arch/x86/lib/thunk_64.S
22896+++ b/arch/x86/lib/thunk_64.S
22897@@ -10,7 +10,8 @@
22898 #include <asm/dwarf2.h>
22899 #include <asm/calling.h>
22900 #include <asm/rwlock.h>
22901-
22902+ #include <asm/alternative-asm.h>
22903+
22904 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22905 .macro thunk name,func
22906 .globl \name
22907@@ -70,6 +71,7 @@
22908 SAVE_ARGS
22909 restore:
22910 RESTORE_ARGS
22911+ pax_force_retaddr
22912 ret
22913 CFI_ENDPROC
22914
22915@@ -77,5 +79,6 @@ restore:
22916 SAVE_ARGS
22917 restore_norax:
22918 RESTORE_ARGS 1
22919+ pax_force_retaddr
22920 ret
22921 CFI_ENDPROC
22922diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22923index 1f118d4..ec4a953 100644
22924--- a/arch/x86/lib/usercopy_32.c
22925+++ b/arch/x86/lib/usercopy_32.c
22926@@ -43,7 +43,7 @@ do { \
22927 __asm__ __volatile__( \
22928 " testl %1,%1\n" \
22929 " jz 2f\n" \
22930- "0: lodsb\n" \
22931+ "0: "__copyuser_seg"lodsb\n" \
22932 " stosb\n" \
22933 " testb %%al,%%al\n" \
22934 " jz 1f\n" \
22935@@ -128,10 +128,12 @@ do { \
22936 int __d0; \
22937 might_fault(); \
22938 __asm__ __volatile__( \
22939+ __COPYUSER_SET_ES \
22940 "0: rep; stosl\n" \
22941 " movl %2,%0\n" \
22942 "1: rep; stosb\n" \
22943 "2:\n" \
22944+ __COPYUSER_RESTORE_ES \
22945 ".section .fixup,\"ax\"\n" \
22946 "3: lea 0(%2,%0,4),%0\n" \
22947 " jmp 2b\n" \
22948@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22949 might_fault();
22950
22951 __asm__ __volatile__(
22952+ __COPYUSER_SET_ES
22953 " testl %0, %0\n"
22954 " jz 3f\n"
22955 " andl %0,%%ecx\n"
22956@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22957 " subl %%ecx,%0\n"
22958 " addl %0,%%eax\n"
22959 "1:\n"
22960+ __COPYUSER_RESTORE_ES
22961 ".section .fixup,\"ax\"\n"
22962 "2: xorl %%eax,%%eax\n"
22963 " jmp 1b\n"
22964@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22965
22966 #ifdef CONFIG_X86_INTEL_USERCOPY
22967 static unsigned long
22968-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22969+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22970 {
22971 int d0, d1;
22972 __asm__ __volatile__(
22973@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22974 " .align 2,0x90\n"
22975 "3: movl 0(%4), %%eax\n"
22976 "4: movl 4(%4), %%edx\n"
22977- "5: movl %%eax, 0(%3)\n"
22978- "6: movl %%edx, 4(%3)\n"
22979+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22980+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22981 "7: movl 8(%4), %%eax\n"
22982 "8: movl 12(%4),%%edx\n"
22983- "9: movl %%eax, 8(%3)\n"
22984- "10: movl %%edx, 12(%3)\n"
22985+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22986+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22987 "11: movl 16(%4), %%eax\n"
22988 "12: movl 20(%4), %%edx\n"
22989- "13: movl %%eax, 16(%3)\n"
22990- "14: movl %%edx, 20(%3)\n"
22991+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22992+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22993 "15: movl 24(%4), %%eax\n"
22994 "16: movl 28(%4), %%edx\n"
22995- "17: movl %%eax, 24(%3)\n"
22996- "18: movl %%edx, 28(%3)\n"
22997+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22998+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22999 "19: movl 32(%4), %%eax\n"
23000 "20: movl 36(%4), %%edx\n"
23001- "21: movl %%eax, 32(%3)\n"
23002- "22: movl %%edx, 36(%3)\n"
23003+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23004+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23005 "23: movl 40(%4), %%eax\n"
23006 "24: movl 44(%4), %%edx\n"
23007- "25: movl %%eax, 40(%3)\n"
23008- "26: movl %%edx, 44(%3)\n"
23009+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23010+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23011 "27: movl 48(%4), %%eax\n"
23012 "28: movl 52(%4), %%edx\n"
23013- "29: movl %%eax, 48(%3)\n"
23014- "30: movl %%edx, 52(%3)\n"
23015+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23016+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23017 "31: movl 56(%4), %%eax\n"
23018 "32: movl 60(%4), %%edx\n"
23019- "33: movl %%eax, 56(%3)\n"
23020- "34: movl %%edx, 60(%3)\n"
23021+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23022+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23023 " addl $-64, %0\n"
23024 " addl $64, %4\n"
23025 " addl $64, %3\n"
23026@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23027 " shrl $2, %0\n"
23028 " andl $3, %%eax\n"
23029 " cld\n"
23030+ __COPYUSER_SET_ES
23031 "99: rep; movsl\n"
23032 "36: movl %%eax, %0\n"
23033 "37: rep; movsb\n"
23034 "100:\n"
23035+ __COPYUSER_RESTORE_ES
23036+ ".section .fixup,\"ax\"\n"
23037+ "101: lea 0(%%eax,%0,4),%0\n"
23038+ " jmp 100b\n"
23039+ ".previous\n"
23040+ ".section __ex_table,\"a\"\n"
23041+ " .align 4\n"
23042+ " .long 1b,100b\n"
23043+ " .long 2b,100b\n"
23044+ " .long 3b,100b\n"
23045+ " .long 4b,100b\n"
23046+ " .long 5b,100b\n"
23047+ " .long 6b,100b\n"
23048+ " .long 7b,100b\n"
23049+ " .long 8b,100b\n"
23050+ " .long 9b,100b\n"
23051+ " .long 10b,100b\n"
23052+ " .long 11b,100b\n"
23053+ " .long 12b,100b\n"
23054+ " .long 13b,100b\n"
23055+ " .long 14b,100b\n"
23056+ " .long 15b,100b\n"
23057+ " .long 16b,100b\n"
23058+ " .long 17b,100b\n"
23059+ " .long 18b,100b\n"
23060+ " .long 19b,100b\n"
23061+ " .long 20b,100b\n"
23062+ " .long 21b,100b\n"
23063+ " .long 22b,100b\n"
23064+ " .long 23b,100b\n"
23065+ " .long 24b,100b\n"
23066+ " .long 25b,100b\n"
23067+ " .long 26b,100b\n"
23068+ " .long 27b,100b\n"
23069+ " .long 28b,100b\n"
23070+ " .long 29b,100b\n"
23071+ " .long 30b,100b\n"
23072+ " .long 31b,100b\n"
23073+ " .long 32b,100b\n"
23074+ " .long 33b,100b\n"
23075+ " .long 34b,100b\n"
23076+ " .long 35b,100b\n"
23077+ " .long 36b,100b\n"
23078+ " .long 37b,100b\n"
23079+ " .long 99b,101b\n"
23080+ ".previous"
23081+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23082+ : "1"(to), "2"(from), "0"(size)
23083+ : "eax", "edx", "memory");
23084+ return size;
23085+}
23086+
23087+static unsigned long
23088+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23089+{
23090+ int d0, d1;
23091+ __asm__ __volatile__(
23092+ " .align 2,0x90\n"
23093+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23094+ " cmpl $67, %0\n"
23095+ " jbe 3f\n"
23096+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23097+ " .align 2,0x90\n"
23098+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23099+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23100+ "5: movl %%eax, 0(%3)\n"
23101+ "6: movl %%edx, 4(%3)\n"
23102+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23103+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23104+ "9: movl %%eax, 8(%3)\n"
23105+ "10: movl %%edx, 12(%3)\n"
23106+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23107+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23108+ "13: movl %%eax, 16(%3)\n"
23109+ "14: movl %%edx, 20(%3)\n"
23110+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23111+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23112+ "17: movl %%eax, 24(%3)\n"
23113+ "18: movl %%edx, 28(%3)\n"
23114+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23115+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23116+ "21: movl %%eax, 32(%3)\n"
23117+ "22: movl %%edx, 36(%3)\n"
23118+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23119+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23120+ "25: movl %%eax, 40(%3)\n"
23121+ "26: movl %%edx, 44(%3)\n"
23122+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23123+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23124+ "29: movl %%eax, 48(%3)\n"
23125+ "30: movl %%edx, 52(%3)\n"
23126+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23127+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23128+ "33: movl %%eax, 56(%3)\n"
23129+ "34: movl %%edx, 60(%3)\n"
23130+ " addl $-64, %0\n"
23131+ " addl $64, %4\n"
23132+ " addl $64, %3\n"
23133+ " cmpl $63, %0\n"
23134+ " ja 1b\n"
23135+ "35: movl %0, %%eax\n"
23136+ " shrl $2, %0\n"
23137+ " andl $3, %%eax\n"
23138+ " cld\n"
23139+ "99: rep; "__copyuser_seg" movsl\n"
23140+ "36: movl %%eax, %0\n"
23141+ "37: rep; "__copyuser_seg" movsb\n"
23142+ "100:\n"
23143 ".section .fixup,\"ax\"\n"
23144 "101: lea 0(%%eax,%0,4),%0\n"
23145 " jmp 100b\n"
23146@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23147 int d0, d1;
23148 __asm__ __volatile__(
23149 " .align 2,0x90\n"
23150- "0: movl 32(%4), %%eax\n"
23151+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23152 " cmpl $67, %0\n"
23153 " jbe 2f\n"
23154- "1: movl 64(%4), %%eax\n"
23155+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23156 " .align 2,0x90\n"
23157- "2: movl 0(%4), %%eax\n"
23158- "21: movl 4(%4), %%edx\n"
23159+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23160+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23161 " movl %%eax, 0(%3)\n"
23162 " movl %%edx, 4(%3)\n"
23163- "3: movl 8(%4), %%eax\n"
23164- "31: movl 12(%4),%%edx\n"
23165+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23166+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23167 " movl %%eax, 8(%3)\n"
23168 " movl %%edx, 12(%3)\n"
23169- "4: movl 16(%4), %%eax\n"
23170- "41: movl 20(%4), %%edx\n"
23171+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23172+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23173 " movl %%eax, 16(%3)\n"
23174 " movl %%edx, 20(%3)\n"
23175- "10: movl 24(%4), %%eax\n"
23176- "51: movl 28(%4), %%edx\n"
23177+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23178+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23179 " movl %%eax, 24(%3)\n"
23180 " movl %%edx, 28(%3)\n"
23181- "11: movl 32(%4), %%eax\n"
23182- "61: movl 36(%4), %%edx\n"
23183+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23184+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23185 " movl %%eax, 32(%3)\n"
23186 " movl %%edx, 36(%3)\n"
23187- "12: movl 40(%4), %%eax\n"
23188- "71: movl 44(%4), %%edx\n"
23189+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23190+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23191 " movl %%eax, 40(%3)\n"
23192 " movl %%edx, 44(%3)\n"
23193- "13: movl 48(%4), %%eax\n"
23194- "81: movl 52(%4), %%edx\n"
23195+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23196+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23197 " movl %%eax, 48(%3)\n"
23198 " movl %%edx, 52(%3)\n"
23199- "14: movl 56(%4), %%eax\n"
23200- "91: movl 60(%4), %%edx\n"
23201+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23202+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23203 " movl %%eax, 56(%3)\n"
23204 " movl %%edx, 60(%3)\n"
23205 " addl $-64, %0\n"
23206@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23207 " shrl $2, %0\n"
23208 " andl $3, %%eax\n"
23209 " cld\n"
23210- "6: rep; movsl\n"
23211+ "6: rep; "__copyuser_seg" movsl\n"
23212 " movl %%eax,%0\n"
23213- "7: rep; movsb\n"
23214+ "7: rep; "__copyuser_seg" movsb\n"
23215 "8:\n"
23216 ".section .fixup,\"ax\"\n"
23217 "9: lea 0(%%eax,%0,4),%0\n"
23218@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23219
23220 __asm__ __volatile__(
23221 " .align 2,0x90\n"
23222- "0: movl 32(%4), %%eax\n"
23223+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23224 " cmpl $67, %0\n"
23225 " jbe 2f\n"
23226- "1: movl 64(%4), %%eax\n"
23227+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23228 " .align 2,0x90\n"
23229- "2: movl 0(%4), %%eax\n"
23230- "21: movl 4(%4), %%edx\n"
23231+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23232+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23233 " movnti %%eax, 0(%3)\n"
23234 " movnti %%edx, 4(%3)\n"
23235- "3: movl 8(%4), %%eax\n"
23236- "31: movl 12(%4),%%edx\n"
23237+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23238+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23239 " movnti %%eax, 8(%3)\n"
23240 " movnti %%edx, 12(%3)\n"
23241- "4: movl 16(%4), %%eax\n"
23242- "41: movl 20(%4), %%edx\n"
23243+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23244+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23245 " movnti %%eax, 16(%3)\n"
23246 " movnti %%edx, 20(%3)\n"
23247- "10: movl 24(%4), %%eax\n"
23248- "51: movl 28(%4), %%edx\n"
23249+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23250+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23251 " movnti %%eax, 24(%3)\n"
23252 " movnti %%edx, 28(%3)\n"
23253- "11: movl 32(%4), %%eax\n"
23254- "61: movl 36(%4), %%edx\n"
23255+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23256+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23257 " movnti %%eax, 32(%3)\n"
23258 " movnti %%edx, 36(%3)\n"
23259- "12: movl 40(%4), %%eax\n"
23260- "71: movl 44(%4), %%edx\n"
23261+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23262+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23263 " movnti %%eax, 40(%3)\n"
23264 " movnti %%edx, 44(%3)\n"
23265- "13: movl 48(%4), %%eax\n"
23266- "81: movl 52(%4), %%edx\n"
23267+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23268+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23269 " movnti %%eax, 48(%3)\n"
23270 " movnti %%edx, 52(%3)\n"
23271- "14: movl 56(%4), %%eax\n"
23272- "91: movl 60(%4), %%edx\n"
23273+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23274+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23275 " movnti %%eax, 56(%3)\n"
23276 " movnti %%edx, 60(%3)\n"
23277 " addl $-64, %0\n"
23278@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23279 " shrl $2, %0\n"
23280 " andl $3, %%eax\n"
23281 " cld\n"
23282- "6: rep; movsl\n"
23283+ "6: rep; "__copyuser_seg" movsl\n"
23284 " movl %%eax,%0\n"
23285- "7: rep; movsb\n"
23286+ "7: rep; "__copyuser_seg" movsb\n"
23287 "8:\n"
23288 ".section .fixup,\"ax\"\n"
23289 "9: lea 0(%%eax,%0,4),%0\n"
23290@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23291
23292 __asm__ __volatile__(
23293 " .align 2,0x90\n"
23294- "0: movl 32(%4), %%eax\n"
23295+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23296 " cmpl $67, %0\n"
23297 " jbe 2f\n"
23298- "1: movl 64(%4), %%eax\n"
23299+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23300 " .align 2,0x90\n"
23301- "2: movl 0(%4), %%eax\n"
23302- "21: movl 4(%4), %%edx\n"
23303+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23304+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23305 " movnti %%eax, 0(%3)\n"
23306 " movnti %%edx, 4(%3)\n"
23307- "3: movl 8(%4), %%eax\n"
23308- "31: movl 12(%4),%%edx\n"
23309+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23310+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23311 " movnti %%eax, 8(%3)\n"
23312 " movnti %%edx, 12(%3)\n"
23313- "4: movl 16(%4), %%eax\n"
23314- "41: movl 20(%4), %%edx\n"
23315+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23316+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23317 " movnti %%eax, 16(%3)\n"
23318 " movnti %%edx, 20(%3)\n"
23319- "10: movl 24(%4), %%eax\n"
23320- "51: movl 28(%4), %%edx\n"
23321+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23322+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23323 " movnti %%eax, 24(%3)\n"
23324 " movnti %%edx, 28(%3)\n"
23325- "11: movl 32(%4), %%eax\n"
23326- "61: movl 36(%4), %%edx\n"
23327+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23328+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23329 " movnti %%eax, 32(%3)\n"
23330 " movnti %%edx, 36(%3)\n"
23331- "12: movl 40(%4), %%eax\n"
23332- "71: movl 44(%4), %%edx\n"
23333+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23334+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23335 " movnti %%eax, 40(%3)\n"
23336 " movnti %%edx, 44(%3)\n"
23337- "13: movl 48(%4), %%eax\n"
23338- "81: movl 52(%4), %%edx\n"
23339+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23340+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23341 " movnti %%eax, 48(%3)\n"
23342 " movnti %%edx, 52(%3)\n"
23343- "14: movl 56(%4), %%eax\n"
23344- "91: movl 60(%4), %%edx\n"
23345+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23346+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23347 " movnti %%eax, 56(%3)\n"
23348 " movnti %%edx, 60(%3)\n"
23349 " addl $-64, %0\n"
23350@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23351 " shrl $2, %0\n"
23352 " andl $3, %%eax\n"
23353 " cld\n"
23354- "6: rep; movsl\n"
23355+ "6: rep; "__copyuser_seg" movsl\n"
23356 " movl %%eax,%0\n"
23357- "7: rep; movsb\n"
23358+ "7: rep; "__copyuser_seg" movsb\n"
23359 "8:\n"
23360 ".section .fixup,\"ax\"\n"
23361 "9: lea 0(%%eax,%0,4),%0\n"
23362@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23363 */
23364 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23365 unsigned long size);
23366-unsigned long __copy_user_intel(void __user *to, const void *from,
23367+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23368+ unsigned long size);
23369+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23370 unsigned long size);
23371 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23372 const void __user *from, unsigned long size);
23373 #endif /* CONFIG_X86_INTEL_USERCOPY */
23374
23375 /* Generic arbitrary sized copy. */
23376-#define __copy_user(to, from, size) \
23377+#define __copy_user(to, from, size, prefix, set, restore) \
23378 do { \
23379 int __d0, __d1, __d2; \
23380 __asm__ __volatile__( \
23381+ set \
23382 " cmp $7,%0\n" \
23383 " jbe 1f\n" \
23384 " movl %1,%0\n" \
23385 " negl %0\n" \
23386 " andl $7,%0\n" \
23387 " subl %0,%3\n" \
23388- "4: rep; movsb\n" \
23389+ "4: rep; "prefix"movsb\n" \
23390 " movl %3,%0\n" \
23391 " shrl $2,%0\n" \
23392 " andl $3,%3\n" \
23393 " .align 2,0x90\n" \
23394- "0: rep; movsl\n" \
23395+ "0: rep; "prefix"movsl\n" \
23396 " movl %3,%0\n" \
23397- "1: rep; movsb\n" \
23398+ "1: rep; "prefix"movsb\n" \
23399 "2:\n" \
23400+ restore \
23401 ".section .fixup,\"ax\"\n" \
23402 "5: addl %3,%0\n" \
23403 " jmp 2b\n" \
23404@@ -682,14 +799,14 @@ do { \
23405 " negl %0\n" \
23406 " andl $7,%0\n" \
23407 " subl %0,%3\n" \
23408- "4: rep; movsb\n" \
23409+ "4: rep; "__copyuser_seg"movsb\n" \
23410 " movl %3,%0\n" \
23411 " shrl $2,%0\n" \
23412 " andl $3,%3\n" \
23413 " .align 2,0x90\n" \
23414- "0: rep; movsl\n" \
23415+ "0: rep; "__copyuser_seg"movsl\n" \
23416 " movl %3,%0\n" \
23417- "1: rep; movsb\n" \
23418+ "1: rep; "__copyuser_seg"movsb\n" \
23419 "2:\n" \
23420 ".section .fixup,\"ax\"\n" \
23421 "5: addl %3,%0\n" \
23422@@ -775,9 +892,9 @@ survive:
23423 }
23424 #endif
23425 if (movsl_is_ok(to, from, n))
23426- __copy_user(to, from, n);
23427+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23428 else
23429- n = __copy_user_intel(to, from, n);
23430+ n = __generic_copy_to_user_intel(to, from, n);
23431 return n;
23432 }
23433 EXPORT_SYMBOL(__copy_to_user_ll);
23434@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23435 unsigned long n)
23436 {
23437 if (movsl_is_ok(to, from, n))
23438- __copy_user(to, from, n);
23439+ __copy_user(to, from, n, __copyuser_seg, "", "");
23440 else
23441- n = __copy_user_intel((void __user *)to,
23442- (const void *)from, n);
23443+ n = __generic_copy_from_user_intel(to, from, n);
23444 return n;
23445 }
23446 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23447@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23448 if (n > 64 && cpu_has_xmm2)
23449 n = __copy_user_intel_nocache(to, from, n);
23450 else
23451- __copy_user(to, from, n);
23452+ __copy_user(to, from, n, __copyuser_seg, "", "");
23453 #else
23454- __copy_user(to, from, n);
23455+ __copy_user(to, from, n, __copyuser_seg, "", "");
23456 #endif
23457 return n;
23458 }
23459 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23460
23461-/**
23462- * copy_to_user: - Copy a block of data into user space.
23463- * @to: Destination address, in user space.
23464- * @from: Source address, in kernel space.
23465- * @n: Number of bytes to copy.
23466- *
23467- * Context: User context only. This function may sleep.
23468- *
23469- * Copy data from kernel space to user space.
23470- *
23471- * Returns number of bytes that could not be copied.
23472- * On success, this will be zero.
23473- */
23474-unsigned long
23475-copy_to_user(void __user *to, const void *from, unsigned long n)
23476+#ifdef CONFIG_PAX_MEMORY_UDEREF
23477+void __set_fs(mm_segment_t x)
23478 {
23479- if (access_ok(VERIFY_WRITE, to, n))
23480- n = __copy_to_user(to, from, n);
23481- return n;
23482+ switch (x.seg) {
23483+ case 0:
23484+ loadsegment(gs, 0);
23485+ break;
23486+ case TASK_SIZE_MAX:
23487+ loadsegment(gs, __USER_DS);
23488+ break;
23489+ case -1UL:
23490+ loadsegment(gs, __KERNEL_DS);
23491+ break;
23492+ default:
23493+ BUG();
23494+ }
23495+ return;
23496 }
23497-EXPORT_SYMBOL(copy_to_user);
23498+EXPORT_SYMBOL(__set_fs);
23499
23500-/**
23501- * copy_from_user: - Copy a block of data from user space.
23502- * @to: Destination address, in kernel space.
23503- * @from: Source address, in user space.
23504- * @n: Number of bytes to copy.
23505- *
23506- * Context: User context only. This function may sleep.
23507- *
23508- * Copy data from user space to kernel space.
23509- *
23510- * Returns number of bytes that could not be copied.
23511- * On success, this will be zero.
23512- *
23513- * If some data could not be copied, this function will pad the copied
23514- * data to the requested size using zero bytes.
23515- */
23516-unsigned long
23517-copy_from_user(void *to, const void __user *from, unsigned long n)
23518+void set_fs(mm_segment_t x)
23519 {
23520- if (access_ok(VERIFY_READ, from, n))
23521- n = __copy_from_user(to, from, n);
23522- else
23523- memset(to, 0, n);
23524- return n;
23525+ current_thread_info()->addr_limit = x;
23526+ __set_fs(x);
23527 }
23528-EXPORT_SYMBOL(copy_from_user);
23529+EXPORT_SYMBOL(set_fs);
23530+#endif
23531diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23532index b7c2849..5ef0f95 100644
23533--- a/arch/x86/lib/usercopy_64.c
23534+++ b/arch/x86/lib/usercopy_64.c
23535@@ -42,6 +42,12 @@ long
23536 __strncpy_from_user(char *dst, const char __user *src, long count)
23537 {
23538 long res;
23539+
23540+#ifdef CONFIG_PAX_MEMORY_UDEREF
23541+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23542+ src += PAX_USER_SHADOW_BASE;
23543+#endif
23544+
23545 __do_strncpy_from_user(dst, src, count, res);
23546 return res;
23547 }
23548@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23549 {
23550 long __d0;
23551 might_fault();
23552+
23553+#ifdef CONFIG_PAX_MEMORY_UDEREF
23554+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23555+ addr += PAX_USER_SHADOW_BASE;
23556+#endif
23557+
23558 /* no memory constraint because it doesn't change any memory gcc knows
23559 about */
23560 asm volatile(
23561@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
23562
23563 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23564 {
23565- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23566- return copy_user_generic((__force void *)to, (__force void *)from, len);
23567- }
23568- return len;
23569+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23570+
23571+#ifdef CONFIG_PAX_MEMORY_UDEREF
23572+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23573+ to += PAX_USER_SHADOW_BASE;
23574+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23575+ from += PAX_USER_SHADOW_BASE;
23576+#endif
23577+
23578+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23579+ }
23580+ return len;
23581 }
23582 EXPORT_SYMBOL(copy_in_user);
23583
23584@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23585 * it is not necessary to optimize tail handling.
23586 */
23587 unsigned long
23588-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23589+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
23590 {
23591 char c;
23592 unsigned zero_len;
23593diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23594index 61b41ca..5fef66a 100644
23595--- a/arch/x86/mm/extable.c
23596+++ b/arch/x86/mm/extable.c
23597@@ -1,14 +1,71 @@
23598 #include <linux/module.h>
23599 #include <linux/spinlock.h>
23600+#include <linux/sort.h>
23601 #include <asm/uaccess.h>
23602+#include <asm/pgtable.h>
23603
23604+/*
23605+ * The exception table needs to be sorted so that the binary
23606+ * search that we use to find entries in it works properly.
23607+ * This is used both for the kernel exception table and for
23608+ * the exception tables of modules that get loaded.
23609+ */
23610+static int cmp_ex(const void *a, const void *b)
23611+{
23612+ const struct exception_table_entry *x = a, *y = b;
23613+
23614+ /* avoid overflow */
23615+ if (x->insn > y->insn)
23616+ return 1;
23617+ if (x->insn < y->insn)
23618+ return -1;
23619+ return 0;
23620+}
23621+
23622+static void swap_ex(void *a, void *b, int size)
23623+{
23624+ struct exception_table_entry t, *x = a, *y = b;
23625+
23626+ t = *x;
23627+
23628+ pax_open_kernel();
23629+ *x = *y;
23630+ *y = t;
23631+ pax_close_kernel();
23632+}
23633+
23634+void sort_extable(struct exception_table_entry *start,
23635+ struct exception_table_entry *finish)
23636+{
23637+ sort(start, finish - start, sizeof(struct exception_table_entry),
23638+ cmp_ex, swap_ex);
23639+}
23640+
23641+#ifdef CONFIG_MODULES
23642+/*
23643+ * If the exception table is sorted, any referring to the module init
23644+ * will be at the beginning or the end.
23645+ */
23646+void trim_init_extable(struct module *m)
23647+{
23648+ /*trim the beginning*/
23649+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23650+ m->extable++;
23651+ m->num_exentries--;
23652+ }
23653+ /*trim the end*/
23654+ while (m->num_exentries &&
23655+ within_module_init(m->extable[m->num_exentries-1].insn, m))
23656+ m->num_exentries--;
23657+}
23658+#endif /* CONFIG_MODULES */
23659
23660 int fixup_exception(struct pt_regs *regs)
23661 {
23662 const struct exception_table_entry *fixup;
23663
23664 #ifdef CONFIG_PNPBIOS
23665- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23666+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23667 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23668 extern u32 pnp_bios_is_utter_crap;
23669 pnp_bios_is_utter_crap = 1;
23670diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23671index 8ac0d76..3f191dc 100644
23672--- a/arch/x86/mm/fault.c
23673+++ b/arch/x86/mm/fault.c
23674@@ -11,10 +11,19 @@
23675 #include <linux/kprobes.h> /* __kprobes, ... */
23676 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23677 #include <linux/perf_event.h> /* perf_sw_event */
23678+#include <linux/unistd.h>
23679+#include <linux/compiler.h>
23680
23681 #include <asm/traps.h> /* dotraplinkage, ... */
23682 #include <asm/pgalloc.h> /* pgd_*(), ... */
23683 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23684+#include <asm/vsyscall.h>
23685+#include <asm/tlbflush.h>
23686+
23687+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23688+#include <asm/stacktrace.h>
23689+#include "../kernel/dumpstack.h"
23690+#endif
23691
23692 /*
23693 * Page fault error code bits:
23694@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23695 int ret = 0;
23696
23697 /* kprobe_running() needs smp_processor_id() */
23698- if (kprobes_built_in() && !user_mode_vm(regs)) {
23699+ if (kprobes_built_in() && !user_mode(regs)) {
23700 preempt_disable();
23701 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23702 ret = 1;
23703@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23704 return !instr_lo || (instr_lo>>1) == 1;
23705 case 0x00:
23706 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23707- if (probe_kernel_address(instr, opcode))
23708+ if (user_mode(regs)) {
23709+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23710+ return 0;
23711+ } else if (probe_kernel_address(instr, opcode))
23712 return 0;
23713
23714 *prefetch = (instr_lo == 0xF) &&
23715@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23716 while (instr < max_instr) {
23717 unsigned char opcode;
23718
23719- if (probe_kernel_address(instr, opcode))
23720+ if (user_mode(regs)) {
23721+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23722+ break;
23723+ } else if (probe_kernel_address(instr, opcode))
23724 break;
23725
23726 instr++;
23727@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23728 force_sig_info(si_signo, &info, tsk);
23729 }
23730
23731+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23732+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23733+#endif
23734+
23735+#ifdef CONFIG_PAX_EMUTRAMP
23736+static int pax_handle_fetch_fault(struct pt_regs *regs);
23737+#endif
23738+
23739+#ifdef CONFIG_PAX_PAGEEXEC
23740+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23741+{
23742+ pgd_t *pgd;
23743+ pud_t *pud;
23744+ pmd_t *pmd;
23745+
23746+ pgd = pgd_offset(mm, address);
23747+ if (!pgd_present(*pgd))
23748+ return NULL;
23749+ pud = pud_offset(pgd, address);
23750+ if (!pud_present(*pud))
23751+ return NULL;
23752+ pmd = pmd_offset(pud, address);
23753+ if (!pmd_present(*pmd))
23754+ return NULL;
23755+ return pmd;
23756+}
23757+#endif
23758+
23759 DEFINE_SPINLOCK(pgd_lock);
23760 LIST_HEAD(pgd_list);
23761
23762@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23763 address += PMD_SIZE) {
23764
23765 unsigned long flags;
23766+
23767+#ifdef CONFIG_PAX_PER_CPU_PGD
23768+ unsigned long cpu;
23769+#else
23770 struct page *page;
23771+#endif
23772
23773 spin_lock_irqsave(&pgd_lock, flags);
23774+
23775+#ifdef CONFIG_PAX_PER_CPU_PGD
23776+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23777+ pgd_t *pgd = get_cpu_pgd(cpu);
23778+#else
23779 list_for_each_entry(page, &pgd_list, lru) {
23780- if (!vmalloc_sync_one(page_address(page), address))
23781+ pgd_t *pgd = page_address(page);
23782+#endif
23783+
23784+ if (!vmalloc_sync_one(pgd, address))
23785 break;
23786 }
23787 spin_unlock_irqrestore(&pgd_lock, flags);
23788@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23789 * an interrupt in the middle of a task switch..
23790 */
23791 pgd_paddr = read_cr3();
23792+
23793+#ifdef CONFIG_PAX_PER_CPU_PGD
23794+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23795+#endif
23796+
23797 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23798 if (!pmd_k)
23799 return -1;
23800@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23801
23802 const pgd_t *pgd_ref = pgd_offset_k(address);
23803 unsigned long flags;
23804+
23805+#ifdef CONFIG_PAX_PER_CPU_PGD
23806+ unsigned long cpu;
23807+#else
23808 struct page *page;
23809+#endif
23810
23811 if (pgd_none(*pgd_ref))
23812 continue;
23813
23814 spin_lock_irqsave(&pgd_lock, flags);
23815+
23816+#ifdef CONFIG_PAX_PER_CPU_PGD
23817+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23818+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23819+#else
23820 list_for_each_entry(page, &pgd_list, lru) {
23821 pgd_t *pgd;
23822 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23823+#endif
23824+
23825 if (pgd_none(*pgd))
23826 set_pgd(pgd, *pgd_ref);
23827 else
23828@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23829 * happen within a race in page table update. In the later
23830 * case just flush:
23831 */
23832+
23833+#ifdef CONFIG_PAX_PER_CPU_PGD
23834+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23835+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23836+#else
23837 pgd = pgd_offset(current->active_mm, address);
23838+#endif
23839+
23840 pgd_ref = pgd_offset_k(address);
23841 if (pgd_none(*pgd_ref))
23842 return -1;
23843@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23844 static int is_errata100(struct pt_regs *regs, unsigned long address)
23845 {
23846 #ifdef CONFIG_X86_64
23847- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23848+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23849 return 1;
23850 #endif
23851 return 0;
23852@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23853 }
23854
23855 static const char nx_warning[] = KERN_CRIT
23856-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23857+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23858
23859 static void
23860 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23861@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23862 if (!oops_may_print())
23863 return;
23864
23865- if (error_code & PF_INSTR) {
23866+ if (nx_enabled && (error_code & PF_INSTR)) {
23867 unsigned int level;
23868
23869 pte_t *pte = lookup_address(address, &level);
23870
23871 if (pte && pte_present(*pte) && !pte_exec(*pte))
23872- printk(nx_warning, current_uid());
23873+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23874 }
23875
23876+#ifdef CONFIG_PAX_KERNEXEC
23877+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23878+ if (current->signal->curr_ip)
23879+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23880+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23881+ else
23882+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23883+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23884+ }
23885+#endif
23886+
23887 printk(KERN_ALERT "BUG: unable to handle kernel ");
23888 if (address < PAGE_SIZE)
23889 printk(KERN_CONT "NULL pointer dereference");
23890@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23891 {
23892 struct task_struct *tsk = current;
23893
23894+#ifdef CONFIG_X86_64
23895+ struct mm_struct *mm = tsk->mm;
23896+
23897+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
23898+ if (regs->ip == (unsigned long)vgettimeofday) {
23899+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
23900+ return;
23901+ } else if (regs->ip == (unsigned long)vtime) {
23902+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
23903+ return;
23904+ } else if (regs->ip == (unsigned long)vgetcpu) {
23905+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
23906+ return;
23907+ }
23908+ }
23909+#endif
23910+
23911 /* User mode accesses just cause a SIGSEGV */
23912 if (error_code & PF_USER) {
23913 /*
23914@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23915 if (is_errata100(regs, address))
23916 return;
23917
23918+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23919+ if (pax_is_fetch_fault(regs, error_code, address)) {
23920+
23921+#ifdef CONFIG_PAX_EMUTRAMP
23922+ switch (pax_handle_fetch_fault(regs)) {
23923+ case 2:
23924+ return;
23925+ }
23926+#endif
23927+
23928+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23929+ do_group_exit(SIGKILL);
23930+ }
23931+#endif
23932+
23933 if (unlikely(show_unhandled_signals))
23934 show_signal_msg(regs, error_code, address, tsk);
23935
23936@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23937 if (fault & VM_FAULT_HWPOISON) {
23938 printk(KERN_ERR
23939 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23940- tsk->comm, tsk->pid, address);
23941+ tsk->comm, task_pid_nr(tsk), address);
23942 code = BUS_MCEERR_AR;
23943 }
23944 #endif
23945@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23946 return 1;
23947 }
23948
23949+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23950+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23951+{
23952+ pte_t *pte;
23953+ pmd_t *pmd;
23954+ spinlock_t *ptl;
23955+ unsigned char pte_mask;
23956+
23957+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23958+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23959+ return 0;
23960+
23961+ /* PaX: it's our fault, let's handle it if we can */
23962+
23963+ /* PaX: take a look at read faults before acquiring any locks */
23964+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23965+ /* instruction fetch attempt from a protected page in user mode */
23966+ up_read(&mm->mmap_sem);
23967+
23968+#ifdef CONFIG_PAX_EMUTRAMP
23969+ switch (pax_handle_fetch_fault(regs)) {
23970+ case 2:
23971+ return 1;
23972+ }
23973+#endif
23974+
23975+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23976+ do_group_exit(SIGKILL);
23977+ }
23978+
23979+ pmd = pax_get_pmd(mm, address);
23980+ if (unlikely(!pmd))
23981+ return 0;
23982+
23983+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23984+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23985+ pte_unmap_unlock(pte, ptl);
23986+ return 0;
23987+ }
23988+
23989+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23990+ /* write attempt to a protected page in user mode */
23991+ pte_unmap_unlock(pte, ptl);
23992+ return 0;
23993+ }
23994+
23995+#ifdef CONFIG_SMP
23996+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23997+#else
23998+ if (likely(address > get_limit(regs->cs)))
23999+#endif
24000+ {
24001+ set_pte(pte, pte_mkread(*pte));
24002+ __flush_tlb_one(address);
24003+ pte_unmap_unlock(pte, ptl);
24004+ up_read(&mm->mmap_sem);
24005+ return 1;
24006+ }
24007+
24008+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24009+
24010+ /*
24011+ * PaX: fill DTLB with user rights and retry
24012+ */
24013+ __asm__ __volatile__ (
24014+ "orb %2,(%1)\n"
24015+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24016+/*
24017+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24018+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24019+ * page fault when examined during a TLB load attempt. this is true not only
24020+ * for PTEs holding a non-present entry but also present entries that will
24021+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24022+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24023+ * for our target pages since their PTEs are simply not in the TLBs at all.
24024+
24025+ * the best thing in omitting it is that we gain around 15-20% speed in the
24026+ * fast path of the page fault handler and can get rid of tracing since we
24027+ * can no longer flush unintended entries.
24028+ */
24029+ "invlpg (%0)\n"
24030+#endif
24031+ __copyuser_seg"testb $0,(%0)\n"
24032+ "xorb %3,(%1)\n"
24033+ :
24034+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24035+ : "memory", "cc");
24036+ pte_unmap_unlock(pte, ptl);
24037+ up_read(&mm->mmap_sem);
24038+ return 1;
24039+}
24040+#endif
24041+
24042 /*
24043 * Handle a spurious fault caused by a stale TLB entry.
24044 *
24045@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24046 static inline int
24047 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24048 {
24049+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24050+ return 1;
24051+
24052 if (write) {
24053 /* write, present and write, not present: */
24054 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24055@@ -956,17 +1175,31 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24056 {
24057 struct vm_area_struct *vma;
24058 struct task_struct *tsk;
24059- unsigned long address;
24060 struct mm_struct *mm;
24061 int write;
24062 int fault;
24063
24064+ /* Get the faulting address: */
24065+ unsigned long address = read_cr2();
24066+
24067+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24068+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24069+ if (!search_exception_tables(regs->ip)) {
24070+ bad_area_nosemaphore(regs, error_code, address);
24071+ return;
24072+ }
24073+ if (address < PAX_USER_SHADOW_BASE) {
24074+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24075+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24076+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24077+ } else
24078+ address -= PAX_USER_SHADOW_BASE;
24079+ }
24080+#endif
24081+
24082 tsk = current;
24083 mm = tsk->mm;
24084
24085- /* Get the faulting address: */
24086- address = read_cr2();
24087-
24088 /*
24089 * Detect and handle instructions that would cause a page fault for
24090 * both a tracked kernel page and a userspace page.
24091@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24092 * User-mode registers count as a user access even for any
24093 * potential system fault or CPU buglet:
24094 */
24095- if (user_mode_vm(regs)) {
24096+ if (user_mode(regs)) {
24097 local_irq_enable();
24098 error_code |= PF_USER;
24099 } else {
24100@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24101 might_sleep();
24102 }
24103
24104+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24105+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24106+ return;
24107+#endif
24108+
24109 vma = find_vma(mm, address);
24110 if (unlikely(!vma)) {
24111 bad_area(regs, error_code, address);
24112@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24113 bad_area(regs, error_code, address);
24114 return;
24115 }
24116- if (error_code & PF_USER) {
24117- /*
24118- * Accessing the stack below %sp is always a bug.
24119- * The large cushion allows instructions like enter
24120- * and pusha to work. ("enter $65535, $31" pushes
24121- * 32 pointers and then decrements %sp by 65535.)
24122- */
24123- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24124- bad_area(regs, error_code, address);
24125- return;
24126- }
24127+ /*
24128+ * Accessing the stack below %sp is always a bug.
24129+ * The large cushion allows instructions like enter
24130+ * and pusha to work. ("enter $65535, $31" pushes
24131+ * 32 pointers and then decrements %sp by 65535.)
24132+ */
24133+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24134+ bad_area(regs, error_code, address);
24135+ return;
24136 }
24137+
24138+#ifdef CONFIG_PAX_SEGMEXEC
24139+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24140+ bad_area(regs, error_code, address);
24141+ return;
24142+ }
24143+#endif
24144+
24145 if (unlikely(expand_stack(vma, address))) {
24146 bad_area(regs, error_code, address);
24147 return;
24148@@ -1146,3 +1390,240 @@ good_area:
24149
24150 up_read(&mm->mmap_sem);
24151 }
24152+
24153+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24154+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24155+{
24156+ struct mm_struct *mm = current->mm;
24157+ unsigned long ip = regs->ip;
24158+
24159+ if (v8086_mode(regs))
24160+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24161+
24162+#ifdef CONFIG_PAX_PAGEEXEC
24163+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24164+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24165+ return true;
24166+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24167+ return true;
24168+ return false;
24169+ }
24170+#endif
24171+
24172+#ifdef CONFIG_PAX_SEGMEXEC
24173+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24174+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24175+ return true;
24176+ return false;
24177+ }
24178+#endif
24179+
24180+ return false;
24181+}
24182+#endif
24183+
24184+#ifdef CONFIG_PAX_EMUTRAMP
24185+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24186+{
24187+ int err;
24188+
24189+ do { /* PaX: gcc trampoline emulation #1 */
24190+ unsigned char mov1, mov2;
24191+ unsigned short jmp;
24192+ unsigned int addr1, addr2;
24193+
24194+#ifdef CONFIG_X86_64
24195+ if ((regs->ip + 11) >> 32)
24196+ break;
24197+#endif
24198+
24199+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24200+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24201+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24202+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24203+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24204+
24205+ if (err)
24206+ break;
24207+
24208+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24209+ regs->cx = addr1;
24210+ regs->ax = addr2;
24211+ regs->ip = addr2;
24212+ return 2;
24213+ }
24214+ } while (0);
24215+
24216+ do { /* PaX: gcc trampoline emulation #2 */
24217+ unsigned char mov, jmp;
24218+ unsigned int addr1, addr2;
24219+
24220+#ifdef CONFIG_X86_64
24221+ if ((regs->ip + 9) >> 32)
24222+ break;
24223+#endif
24224+
24225+ err = get_user(mov, (unsigned char __user *)regs->ip);
24226+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24227+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24228+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24229+
24230+ if (err)
24231+ break;
24232+
24233+ if (mov == 0xB9 && jmp == 0xE9) {
24234+ regs->cx = addr1;
24235+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24236+ return 2;
24237+ }
24238+ } while (0);
24239+
24240+ return 1; /* PaX in action */
24241+}
24242+
24243+#ifdef CONFIG_X86_64
24244+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24245+{
24246+ int err;
24247+
24248+ do { /* PaX: gcc trampoline emulation #1 */
24249+ unsigned short mov1, mov2, jmp1;
24250+ unsigned char jmp2;
24251+ unsigned int addr1;
24252+ unsigned long addr2;
24253+
24254+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24255+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24256+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24257+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24258+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24259+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24260+
24261+ if (err)
24262+ break;
24263+
24264+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24265+ regs->r11 = addr1;
24266+ regs->r10 = addr2;
24267+ regs->ip = addr1;
24268+ return 2;
24269+ }
24270+ } while (0);
24271+
24272+ do { /* PaX: gcc trampoline emulation #2 */
24273+ unsigned short mov1, mov2, jmp1;
24274+ unsigned char jmp2;
24275+ unsigned long addr1, addr2;
24276+
24277+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24278+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24279+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24280+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24281+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24282+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24283+
24284+ if (err)
24285+ break;
24286+
24287+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24288+ regs->r11 = addr1;
24289+ regs->r10 = addr2;
24290+ regs->ip = addr1;
24291+ return 2;
24292+ }
24293+ } while (0);
24294+
24295+ return 1; /* PaX in action */
24296+}
24297+#endif
24298+
24299+/*
24300+ * PaX: decide what to do with offenders (regs->ip = fault address)
24301+ *
24302+ * returns 1 when task should be killed
24303+ * 2 when gcc trampoline was detected
24304+ */
24305+static int pax_handle_fetch_fault(struct pt_regs *regs)
24306+{
24307+ if (v8086_mode(regs))
24308+ return 1;
24309+
24310+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24311+ return 1;
24312+
24313+#ifdef CONFIG_X86_32
24314+ return pax_handle_fetch_fault_32(regs);
24315+#else
24316+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24317+ return pax_handle_fetch_fault_32(regs);
24318+ else
24319+ return pax_handle_fetch_fault_64(regs);
24320+#endif
24321+}
24322+#endif
24323+
24324+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24325+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24326+{
24327+ long i;
24328+
24329+ printk(KERN_ERR "PAX: bytes at PC: ");
24330+ for (i = 0; i < 20; i++) {
24331+ unsigned char c;
24332+ if (get_user(c, (unsigned char __force_user *)pc+i))
24333+ printk(KERN_CONT "?? ");
24334+ else
24335+ printk(KERN_CONT "%02x ", c);
24336+ }
24337+ printk("\n");
24338+
24339+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24340+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24341+ unsigned long c;
24342+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24343+#ifdef CONFIG_X86_32
24344+ printk(KERN_CONT "???????? ");
24345+#else
24346+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24347+ printk(KERN_CONT "???????? ???????? ");
24348+ else
24349+ printk(KERN_CONT "???????????????? ");
24350+#endif
24351+ } else {
24352+#ifdef CONFIG_X86_64
24353+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24354+ printk(KERN_CONT "%08x ", (unsigned int)c);
24355+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24356+ } else
24357+#endif
24358+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24359+ }
24360+ }
24361+ printk("\n");
24362+}
24363+#endif
24364+
24365+/**
24366+ * probe_kernel_write(): safely attempt to write to a location
24367+ * @dst: address to write to
24368+ * @src: pointer to the data that shall be written
24369+ * @size: size of the data chunk
24370+ *
24371+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24372+ * happens, handle that and return -EFAULT.
24373+ */
24374+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24375+{
24376+ long ret;
24377+ mm_segment_t old_fs = get_fs();
24378+
24379+ set_fs(KERNEL_DS);
24380+ pagefault_disable();
24381+ pax_open_kernel();
24382+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24383+ pax_close_kernel();
24384+ pagefault_enable();
24385+ set_fs(old_fs);
24386+
24387+ return ret ? -EFAULT : 0;
24388+}
24389diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24390index 71da1bc..7a16bf4 100644
24391--- a/arch/x86/mm/gup.c
24392+++ b/arch/x86/mm/gup.c
24393@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24394 addr = start;
24395 len = (unsigned long) nr_pages << PAGE_SHIFT;
24396 end = start + len;
24397- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24398+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24399 (void __user *)start, len)))
24400 return 0;
24401
24402diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24403index 63a6ba6..79abd7a 100644
24404--- a/arch/x86/mm/highmem_32.c
24405+++ b/arch/x86/mm/highmem_32.c
24406@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24407 idx = type + KM_TYPE_NR*smp_processor_id();
24408 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24409 BUG_ON(!pte_none(*(kmap_pte-idx)));
24410+
24411+ pax_open_kernel();
24412 set_pte(kmap_pte-idx, mk_pte(page, prot));
24413+ pax_close_kernel();
24414
24415 return (void *)vaddr;
24416 }
24417diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24418index f46c340..6ff9a26 100644
24419--- a/arch/x86/mm/hugetlbpage.c
24420+++ b/arch/x86/mm/hugetlbpage.c
24421@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24422 struct hstate *h = hstate_file(file);
24423 struct mm_struct *mm = current->mm;
24424 struct vm_area_struct *vma;
24425- unsigned long start_addr;
24426+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24427+
24428+#ifdef CONFIG_PAX_SEGMEXEC
24429+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24430+ pax_task_size = SEGMEXEC_TASK_SIZE;
24431+#endif
24432+
24433+ pax_task_size -= PAGE_SIZE;
24434
24435 if (len > mm->cached_hole_size) {
24436- start_addr = mm->free_area_cache;
24437+ start_addr = mm->free_area_cache;
24438 } else {
24439- start_addr = TASK_UNMAPPED_BASE;
24440- mm->cached_hole_size = 0;
24441+ start_addr = mm->mmap_base;
24442+ mm->cached_hole_size = 0;
24443 }
24444
24445 full_search:
24446@@ -281,26 +288,27 @@ full_search:
24447
24448 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24449 /* At this point: (!vma || addr < vma->vm_end). */
24450- if (TASK_SIZE - len < addr) {
24451+ if (pax_task_size - len < addr) {
24452 /*
24453 * Start a new search - just in case we missed
24454 * some holes.
24455 */
24456- if (start_addr != TASK_UNMAPPED_BASE) {
24457- start_addr = TASK_UNMAPPED_BASE;
24458+ if (start_addr != mm->mmap_base) {
24459+ start_addr = mm->mmap_base;
24460 mm->cached_hole_size = 0;
24461 goto full_search;
24462 }
24463 return -ENOMEM;
24464 }
24465- if (!vma || addr + len <= vma->vm_start) {
24466- mm->free_area_cache = addr + len;
24467- return addr;
24468- }
24469+ if (check_heap_stack_gap(vma, addr, len))
24470+ break;
24471 if (addr + mm->cached_hole_size < vma->vm_start)
24472 mm->cached_hole_size = vma->vm_start - addr;
24473 addr = ALIGN(vma->vm_end, huge_page_size(h));
24474 }
24475+
24476+ mm->free_area_cache = addr + len;
24477+ return addr;
24478 }
24479
24480 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24481@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24482 {
24483 struct hstate *h = hstate_file(file);
24484 struct mm_struct *mm = current->mm;
24485- struct vm_area_struct *vma, *prev_vma;
24486- unsigned long base = mm->mmap_base, addr = addr0;
24487+ struct vm_area_struct *vma;
24488+ unsigned long base = mm->mmap_base, addr;
24489 unsigned long largest_hole = mm->cached_hole_size;
24490- int first_time = 1;
24491
24492 /* don't allow allocations above current base */
24493 if (mm->free_area_cache > base)
24494@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24495 largest_hole = 0;
24496 mm->free_area_cache = base;
24497 }
24498-try_again:
24499+
24500 /* make sure it can fit in the remaining address space */
24501 if (mm->free_area_cache < len)
24502 goto fail;
24503
24504 /* either no address requested or cant fit in requested address hole */
24505- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24506+ addr = (mm->free_area_cache - len);
24507 do {
24508+ addr &= huge_page_mask(h);
24509+ vma = find_vma(mm, addr);
24510 /*
24511 * Lookup failure means no vma is above this address,
24512 * i.e. return with success:
24513- */
24514- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24515- return addr;
24516-
24517- /*
24518 * new region fits between prev_vma->vm_end and
24519 * vma->vm_start, use it:
24520 */
24521- if (addr + len <= vma->vm_start &&
24522- (!prev_vma || (addr >= prev_vma->vm_end))) {
24523+ if (check_heap_stack_gap(vma, addr, len)) {
24524 /* remember the address as a hint for next time */
24525- mm->cached_hole_size = largest_hole;
24526- return (mm->free_area_cache = addr);
24527- } else {
24528- /* pull free_area_cache down to the first hole */
24529- if (mm->free_area_cache == vma->vm_end) {
24530- mm->free_area_cache = vma->vm_start;
24531- mm->cached_hole_size = largest_hole;
24532- }
24533+ mm->cached_hole_size = largest_hole;
24534+ return (mm->free_area_cache = addr);
24535+ }
24536+ /* pull free_area_cache down to the first hole */
24537+ if (mm->free_area_cache == vma->vm_end) {
24538+ mm->free_area_cache = vma->vm_start;
24539+ mm->cached_hole_size = largest_hole;
24540 }
24541
24542 /* remember the largest hole we saw so far */
24543 if (addr + largest_hole < vma->vm_start)
24544- largest_hole = vma->vm_start - addr;
24545+ largest_hole = vma->vm_start - addr;
24546
24547 /* try just below the current vma->vm_start */
24548- addr = (vma->vm_start - len) & huge_page_mask(h);
24549- } while (len <= vma->vm_start);
24550+ addr = skip_heap_stack_gap(vma, len);
24551+ } while (!IS_ERR_VALUE(addr));
24552
24553 fail:
24554 /*
24555- * if hint left us with no space for the requested
24556- * mapping then try again:
24557- */
24558- if (first_time) {
24559- mm->free_area_cache = base;
24560- largest_hole = 0;
24561- first_time = 0;
24562- goto try_again;
24563- }
24564- /*
24565 * A failed mmap() very likely causes application failure,
24566 * so fall back to the bottom-up function here. This scenario
24567 * can happen with large stack limits and large mmap()
24568 * allocations.
24569 */
24570- mm->free_area_cache = TASK_UNMAPPED_BASE;
24571+
24572+#ifdef CONFIG_PAX_SEGMEXEC
24573+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24574+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24575+ else
24576+#endif
24577+
24578+ mm->mmap_base = TASK_UNMAPPED_BASE;
24579+
24580+#ifdef CONFIG_PAX_RANDMMAP
24581+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24582+ mm->mmap_base += mm->delta_mmap;
24583+#endif
24584+
24585+ mm->free_area_cache = mm->mmap_base;
24586 mm->cached_hole_size = ~0UL;
24587 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24588 len, pgoff, flags);
24589@@ -387,6 +393,7 @@ fail:
24590 /*
24591 * Restore the topdown base:
24592 */
24593+ mm->mmap_base = base;
24594 mm->free_area_cache = base;
24595 mm->cached_hole_size = ~0UL;
24596
24597@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24598 struct hstate *h = hstate_file(file);
24599 struct mm_struct *mm = current->mm;
24600 struct vm_area_struct *vma;
24601+ unsigned long pax_task_size = TASK_SIZE;
24602
24603 if (len & ~huge_page_mask(h))
24604 return -EINVAL;
24605- if (len > TASK_SIZE)
24606+
24607+#ifdef CONFIG_PAX_SEGMEXEC
24608+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24609+ pax_task_size = SEGMEXEC_TASK_SIZE;
24610+#endif
24611+
24612+ pax_task_size -= PAGE_SIZE;
24613+
24614+ if (len > pax_task_size)
24615 return -ENOMEM;
24616
24617 if (flags & MAP_FIXED) {
24618@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24619 if (addr) {
24620 addr = ALIGN(addr, huge_page_size(h));
24621 vma = find_vma(mm, addr);
24622- if (TASK_SIZE - len >= addr &&
24623- (!vma || addr + len <= vma->vm_start))
24624+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24625 return addr;
24626 }
24627 if (mm->get_unmapped_area == arch_get_unmapped_area)
24628diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24629index 73ffd55..ad78676 100644
24630--- a/arch/x86/mm/init.c
24631+++ b/arch/x86/mm/init.c
24632@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24633 * cause a hotspot and fill up ZONE_DMA. The page tables
24634 * need roughly 0.5KB per GB.
24635 */
24636-#ifdef CONFIG_X86_32
24637- start = 0x7000;
24638-#else
24639- start = 0x8000;
24640-#endif
24641+ start = 0x100000;
24642 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24643 tables, PAGE_SIZE);
24644 if (e820_table_start == -1UL)
24645@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24646 #endif
24647
24648 set_nx();
24649- if (nx_enabled)
24650+ if (nx_enabled && cpu_has_nx)
24651 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24652
24653 /* Enable PSE if available */
24654@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24655 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24656 * mmio resources as well as potential bios/acpi data regions.
24657 */
24658+
24659 int devmem_is_allowed(unsigned long pagenr)
24660 {
24661+#ifdef CONFIG_GRKERNSEC_KMEM
24662+ /* allow BDA */
24663+ if (!pagenr)
24664+ return 1;
24665+ /* allow EBDA */
24666+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24667+ return 1;
24668+ /* allow ISA/video mem */
24669+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24670+ return 1;
24671+ /* throw out everything else below 1MB */
24672+ if (pagenr <= 256)
24673+ return 0;
24674+#else
24675 if (pagenr <= 256)
24676 return 1;
24677+#endif
24678+
24679 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24680 return 0;
24681 if (!page_is_ram(pagenr))
24682@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24683
24684 void free_initmem(void)
24685 {
24686+
24687+#ifdef CONFIG_PAX_KERNEXEC
24688+#ifdef CONFIG_X86_32
24689+ /* PaX: limit KERNEL_CS to actual size */
24690+ unsigned long addr, limit;
24691+ struct desc_struct d;
24692+ int cpu;
24693+
24694+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24695+ limit = (limit - 1UL) >> PAGE_SHIFT;
24696+
24697+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24698+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
24699+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24700+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24701+ }
24702+
24703+ /* PaX: make KERNEL_CS read-only */
24704+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24705+ if (!paravirt_enabled())
24706+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24707+/*
24708+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24709+ pgd = pgd_offset_k(addr);
24710+ pud = pud_offset(pgd, addr);
24711+ pmd = pmd_offset(pud, addr);
24712+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24713+ }
24714+*/
24715+#ifdef CONFIG_X86_PAE
24716+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24717+/*
24718+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24719+ pgd = pgd_offset_k(addr);
24720+ pud = pud_offset(pgd, addr);
24721+ pmd = pmd_offset(pud, addr);
24722+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24723+ }
24724+*/
24725+#endif
24726+
24727+#ifdef CONFIG_MODULES
24728+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24729+#endif
24730+
24731+#else
24732+ pgd_t *pgd;
24733+ pud_t *pud;
24734+ pmd_t *pmd;
24735+ unsigned long addr, end;
24736+
24737+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24738+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24739+ pgd = pgd_offset_k(addr);
24740+ pud = pud_offset(pgd, addr);
24741+ pmd = pmd_offset(pud, addr);
24742+ if (!pmd_present(*pmd))
24743+ continue;
24744+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24745+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24746+ else
24747+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24748+ }
24749+
24750+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24751+ end = addr + KERNEL_IMAGE_SIZE;
24752+ for (; addr < end; addr += PMD_SIZE) {
24753+ pgd = pgd_offset_k(addr);
24754+ pud = pud_offset(pgd, addr);
24755+ pmd = pmd_offset(pud, addr);
24756+ if (!pmd_present(*pmd))
24757+ continue;
24758+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24759+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24760+ }
24761+#endif
24762+
24763+ flush_tlb_all();
24764+#endif
24765+
24766 free_init_pages("unused kernel memory",
24767 (unsigned long)(&__init_begin),
24768 (unsigned long)(&__init_end));
24769diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24770index 30938c1..bda3d5d 100644
24771--- a/arch/x86/mm/init_32.c
24772+++ b/arch/x86/mm/init_32.c
24773@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24774 }
24775
24776 /*
24777- * Creates a middle page table and puts a pointer to it in the
24778- * given global directory entry. This only returns the gd entry
24779- * in non-PAE compilation mode, since the middle layer is folded.
24780- */
24781-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24782-{
24783- pud_t *pud;
24784- pmd_t *pmd_table;
24785-
24786-#ifdef CONFIG_X86_PAE
24787- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24788- if (after_bootmem)
24789- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24790- else
24791- pmd_table = (pmd_t *)alloc_low_page();
24792- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24793- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24794- pud = pud_offset(pgd, 0);
24795- BUG_ON(pmd_table != pmd_offset(pud, 0));
24796-
24797- return pmd_table;
24798- }
24799-#endif
24800- pud = pud_offset(pgd, 0);
24801- pmd_table = pmd_offset(pud, 0);
24802-
24803- return pmd_table;
24804-}
24805-
24806-/*
24807 * Create a page table and place a pointer to it in a middle page
24808 * directory entry:
24809 */
24810@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24811 page_table = (pte_t *)alloc_low_page();
24812
24813 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24814+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24815+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24816+#else
24817 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24818+#endif
24819 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24820 }
24821
24822 return pte_offset_kernel(pmd, 0);
24823 }
24824
24825+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24826+{
24827+ pud_t *pud;
24828+ pmd_t *pmd_table;
24829+
24830+ pud = pud_offset(pgd, 0);
24831+ pmd_table = pmd_offset(pud, 0);
24832+
24833+ return pmd_table;
24834+}
24835+
24836 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24837 {
24838 int pgd_idx = pgd_index(vaddr);
24839@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24840 int pgd_idx, pmd_idx;
24841 unsigned long vaddr;
24842 pgd_t *pgd;
24843+ pud_t *pud;
24844 pmd_t *pmd;
24845 pte_t *pte = NULL;
24846
24847@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24848 pgd = pgd_base + pgd_idx;
24849
24850 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24851- pmd = one_md_table_init(pgd);
24852- pmd = pmd + pmd_index(vaddr);
24853+ pud = pud_offset(pgd, vaddr);
24854+ pmd = pmd_offset(pud, vaddr);
24855+
24856+#ifdef CONFIG_X86_PAE
24857+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24858+#endif
24859+
24860 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24861 pmd++, pmd_idx++) {
24862 pte = page_table_kmap_check(one_page_table_init(pmd),
24863@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24864 }
24865 }
24866
24867-static inline int is_kernel_text(unsigned long addr)
24868+static inline int is_kernel_text(unsigned long start, unsigned long end)
24869 {
24870- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
24871- return 1;
24872- return 0;
24873+ if ((start > ktla_ktva((unsigned long)_etext) ||
24874+ end <= ktla_ktva((unsigned long)_stext)) &&
24875+ (start > ktla_ktva((unsigned long)_einittext) ||
24876+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24877+
24878+#ifdef CONFIG_ACPI_SLEEP
24879+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24880+#endif
24881+
24882+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24883+ return 0;
24884+ return 1;
24885 }
24886
24887 /*
24888@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
24889 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
24890 unsigned long start_pfn, end_pfn;
24891 pgd_t *pgd_base = swapper_pg_dir;
24892- int pgd_idx, pmd_idx, pte_ofs;
24893+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24894 unsigned long pfn;
24895 pgd_t *pgd;
24896+ pud_t *pud;
24897 pmd_t *pmd;
24898 pte_t *pte;
24899 unsigned pages_2m, pages_4k;
24900@@ -278,8 +279,13 @@ repeat:
24901 pfn = start_pfn;
24902 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24903 pgd = pgd_base + pgd_idx;
24904- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24905- pmd = one_md_table_init(pgd);
24906+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24907+ pud = pud_offset(pgd, 0);
24908+ pmd = pmd_offset(pud, 0);
24909+
24910+#ifdef CONFIG_X86_PAE
24911+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24912+#endif
24913
24914 if (pfn >= end_pfn)
24915 continue;
24916@@ -291,14 +297,13 @@ repeat:
24917 #endif
24918 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24919 pmd++, pmd_idx++) {
24920- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24921+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24922
24923 /*
24924 * Map with big pages if possible, otherwise
24925 * create normal page tables:
24926 */
24927 if (use_pse) {
24928- unsigned int addr2;
24929 pgprot_t prot = PAGE_KERNEL_LARGE;
24930 /*
24931 * first pass will use the same initial
24932@@ -308,11 +313,7 @@ repeat:
24933 __pgprot(PTE_IDENT_ATTR |
24934 _PAGE_PSE);
24935
24936- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24937- PAGE_OFFSET + PAGE_SIZE-1;
24938-
24939- if (is_kernel_text(addr) ||
24940- is_kernel_text(addr2))
24941+ if (is_kernel_text(address, address + PMD_SIZE))
24942 prot = PAGE_KERNEL_LARGE_EXEC;
24943
24944 pages_2m++;
24945@@ -329,7 +330,7 @@ repeat:
24946 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24947 pte += pte_ofs;
24948 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24949- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24950+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24951 pgprot_t prot = PAGE_KERNEL;
24952 /*
24953 * first pass will use the same initial
24954@@ -337,7 +338,7 @@ repeat:
24955 */
24956 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24957
24958- if (is_kernel_text(addr))
24959+ if (is_kernel_text(address, address + PAGE_SIZE))
24960 prot = PAGE_KERNEL_EXEC;
24961
24962 pages_4k++;
24963@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24964
24965 pud = pud_offset(pgd, va);
24966 pmd = pmd_offset(pud, va);
24967- if (!pmd_present(*pmd))
24968+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24969 break;
24970
24971 pte = pte_offset_kernel(pmd, va);
24972@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
24973
24974 static void __init pagetable_init(void)
24975 {
24976- pgd_t *pgd_base = swapper_pg_dir;
24977-
24978- permanent_kmaps_init(pgd_base);
24979+ permanent_kmaps_init(swapper_pg_dir);
24980 }
24981
24982 #ifdef CONFIG_ACPI_SLEEP
24983@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
24984 * ACPI suspend needs this for resume, because things like the intel-agp
24985 * driver might have split up a kernel 4MB mapping.
24986 */
24987-char swsusp_pg_dir[PAGE_SIZE]
24988+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
24989 __attribute__ ((aligned(PAGE_SIZE)));
24990
24991 static inline void save_pg_dir(void)
24992 {
24993- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
24994+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
24995 }
24996 #else /* !CONFIG_ACPI_SLEEP */
24997 static inline void save_pg_dir(void)
24998@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
24999 flush_tlb_all();
25000 }
25001
25002-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25003+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25004 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25005
25006 /* user-defined highmem size */
25007@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25008 * Initialize the boot-time allocator (with low memory only):
25009 */
25010 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25011- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25012+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25013 PAGE_SIZE);
25014 if (bootmap == -1L)
25015 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25016@@ -864,6 +863,12 @@ void __init mem_init(void)
25017
25018 pci_iommu_alloc();
25019
25020+#ifdef CONFIG_PAX_PER_CPU_PGD
25021+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25022+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25023+ KERNEL_PGD_PTRS);
25024+#endif
25025+
25026 #ifdef CONFIG_FLATMEM
25027 BUG_ON(!mem_map);
25028 #endif
25029@@ -881,7 +886,7 @@ void __init mem_init(void)
25030 set_highmem_pages_init();
25031
25032 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25033- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25034+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25035 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25036
25037 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25038@@ -923,10 +928,10 @@ void __init mem_init(void)
25039 ((unsigned long)&__init_end -
25040 (unsigned long)&__init_begin) >> 10,
25041
25042- (unsigned long)&_etext, (unsigned long)&_edata,
25043- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25044+ (unsigned long)&_sdata, (unsigned long)&_edata,
25045+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25046
25047- (unsigned long)&_text, (unsigned long)&_etext,
25048+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25049 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25050
25051 /*
25052@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25053 if (!kernel_set_to_readonly)
25054 return;
25055
25056+ start = ktla_ktva(start);
25057 pr_debug("Set kernel text: %lx - %lx for read write\n",
25058 start, start+size);
25059
25060@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25061 if (!kernel_set_to_readonly)
25062 return;
25063
25064+ start = ktla_ktva(start);
25065 pr_debug("Set kernel text: %lx - %lx for read only\n",
25066 start, start+size);
25067
25068@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25069 unsigned long start = PFN_ALIGN(_text);
25070 unsigned long size = PFN_ALIGN(_etext) - start;
25071
25072+ start = ktla_ktva(start);
25073 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25074 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25075 size >> 10);
25076diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25077index 7d095ad..25d2549 100644
25078--- a/arch/x86/mm/init_64.c
25079+++ b/arch/x86/mm/init_64.c
25080@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25081 pmd = fill_pmd(pud, vaddr);
25082 pte = fill_pte(pmd, vaddr);
25083
25084+ pax_open_kernel();
25085 set_pte(pte, new_pte);
25086+ pax_close_kernel();
25087
25088 /*
25089 * It's enough to flush this one mapping.
25090@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25091 pgd = pgd_offset_k((unsigned long)__va(phys));
25092 if (pgd_none(*pgd)) {
25093 pud = (pud_t *) spp_getpage();
25094- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25095- _PAGE_USER));
25096+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25097 }
25098 pud = pud_offset(pgd, (unsigned long)__va(phys));
25099 if (pud_none(*pud)) {
25100 pmd = (pmd_t *) spp_getpage();
25101- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25102- _PAGE_USER));
25103+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25104 }
25105 pmd = pmd_offset(pud, phys);
25106 BUG_ON(!pmd_none(*pmd));
25107@@ -675,6 +675,12 @@ void __init mem_init(void)
25108
25109 pci_iommu_alloc();
25110
25111+#ifdef CONFIG_PAX_PER_CPU_PGD
25112+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25113+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25114+ KERNEL_PGD_PTRS);
25115+#endif
25116+
25117 /* clear_bss() already clear the empty_zero_page */
25118
25119 reservedpages = 0;
25120@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25121 static struct vm_area_struct gate_vma = {
25122 .vm_start = VSYSCALL_START,
25123 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25124- .vm_page_prot = PAGE_READONLY_EXEC,
25125- .vm_flags = VM_READ | VM_EXEC
25126+ .vm_page_prot = PAGE_READONLY,
25127+ .vm_flags = VM_READ
25128 };
25129
25130 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25131@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25132
25133 const char *arch_vma_name(struct vm_area_struct *vma)
25134 {
25135- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25136+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25137 return "[vdso]";
25138 if (vma == &gate_vma)
25139 return "[vsyscall]";
25140diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25141index 84e236c..69bd3f6 100644
25142--- a/arch/x86/mm/iomap_32.c
25143+++ b/arch/x86/mm/iomap_32.c
25144@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25145 debug_kmap_atomic(type);
25146 idx = type + KM_TYPE_NR * smp_processor_id();
25147 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25148+
25149+ pax_open_kernel();
25150 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25151+ pax_close_kernel();
25152+
25153 arch_flush_lazy_mmu_mode();
25154
25155 return (void *)vaddr;
25156diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25157index 2feb9bd..3646202 100644
25158--- a/arch/x86/mm/ioremap.c
25159+++ b/arch/x86/mm/ioremap.c
25160@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25161 * Second special case: Some BIOSen report the PC BIOS
25162 * area (640->1Mb) as ram even though it is not.
25163 */
25164- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25165- pagenr < (BIOS_END >> PAGE_SHIFT))
25166+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25167+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25168 return 0;
25169
25170 for (i = 0; i < e820.nr_map; i++) {
25171@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25172 /*
25173 * Don't allow anybody to remap normal RAM that we're using..
25174 */
25175- for (pfn = phys_addr >> PAGE_SHIFT;
25176- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25177- pfn++) {
25178-
25179+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25180 int is_ram = page_is_ram(pfn);
25181
25182- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25183+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25184 return NULL;
25185 WARN_ON_ONCE(is_ram);
25186 }
25187@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_setup(char *str)
25188 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25189
25190 static __initdata int after_paging_init;
25191-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25192+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25193
25194 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25195 {
25196@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
25197 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25198
25199 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25200- memset(bm_pte, 0, sizeof(bm_pte));
25201- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25202+ pmd_populate_user(&init_mm, pmd, bm_pte);
25203
25204 /*
25205 * The boot-ioremap range spans multiple pmds, for which
25206diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25207index 8cc1833..1abbc5b 100644
25208--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25209+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25210@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25211 * memory (e.g. tracked pages)? For now, we need this to avoid
25212 * invoking kmemcheck for PnP BIOS calls.
25213 */
25214- if (regs->flags & X86_VM_MASK)
25215+ if (v8086_mode(regs))
25216 return false;
25217- if (regs->cs != __KERNEL_CS)
25218+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25219 return false;
25220
25221 pte = kmemcheck_pte_lookup(address);
25222diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25223index c8191de..2975082 100644
25224--- a/arch/x86/mm/mmap.c
25225+++ b/arch/x86/mm/mmap.c
25226@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25227 * Leave an at least ~128 MB hole with possible stack randomization.
25228 */
25229 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25230-#define MAX_GAP (TASK_SIZE/6*5)
25231+#define MAX_GAP (pax_task_size/6*5)
25232
25233 /*
25234 * True on X86_32 or when emulating IA32 on X86_64
25235@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25236 return rnd << PAGE_SHIFT;
25237 }
25238
25239-static unsigned long mmap_base(void)
25240+static unsigned long mmap_base(struct mm_struct *mm)
25241 {
25242 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25243+ unsigned long pax_task_size = TASK_SIZE;
25244+
25245+#ifdef CONFIG_PAX_SEGMEXEC
25246+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25247+ pax_task_size = SEGMEXEC_TASK_SIZE;
25248+#endif
25249
25250 if (gap < MIN_GAP)
25251 gap = MIN_GAP;
25252 else if (gap > MAX_GAP)
25253 gap = MAX_GAP;
25254
25255- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25256+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25257 }
25258
25259 /*
25260 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25261 * does, but not when emulating X86_32
25262 */
25263-static unsigned long mmap_legacy_base(void)
25264+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25265 {
25266- if (mmap_is_ia32())
25267+ if (mmap_is_ia32()) {
25268+
25269+#ifdef CONFIG_PAX_SEGMEXEC
25270+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25271+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25272+ else
25273+#endif
25274+
25275 return TASK_UNMAPPED_BASE;
25276- else
25277+ } else
25278 return TASK_UNMAPPED_BASE + mmap_rnd();
25279 }
25280
25281@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25282 void arch_pick_mmap_layout(struct mm_struct *mm)
25283 {
25284 if (mmap_is_legacy()) {
25285- mm->mmap_base = mmap_legacy_base();
25286+ mm->mmap_base = mmap_legacy_base(mm);
25287+
25288+#ifdef CONFIG_PAX_RANDMMAP
25289+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25290+ mm->mmap_base += mm->delta_mmap;
25291+#endif
25292+
25293 mm->get_unmapped_area = arch_get_unmapped_area;
25294 mm->unmap_area = arch_unmap_area;
25295 } else {
25296- mm->mmap_base = mmap_base();
25297+ mm->mmap_base = mmap_base(mm);
25298+
25299+#ifdef CONFIG_PAX_RANDMMAP
25300+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25301+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25302+#endif
25303+
25304 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25305 mm->unmap_area = arch_unmap_area_topdown;
25306 }
25307diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25308index 132772a..b961f11 100644
25309--- a/arch/x86/mm/mmio-mod.c
25310+++ b/arch/x86/mm/mmio-mod.c
25311@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25312 break;
25313 default:
25314 {
25315- unsigned char *ip = (unsigned char *)instptr;
25316+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25317 my_trace->opcode = MMIO_UNKNOWN_OP;
25318 my_trace->width = 0;
25319 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25320@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25321 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25322 void __iomem *addr)
25323 {
25324- static atomic_t next_id;
25325+ static atomic_unchecked_t next_id;
25326 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25327 /* These are page-unaligned. */
25328 struct mmiotrace_map map = {
25329@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25330 .private = trace
25331 },
25332 .phys = offset,
25333- .id = atomic_inc_return(&next_id)
25334+ .id = atomic_inc_return_unchecked(&next_id)
25335 };
25336 map.map_id = trace->id;
25337
25338diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25339index d253006..e56dd6a 100644
25340--- a/arch/x86/mm/numa_32.c
25341+++ b/arch/x86/mm/numa_32.c
25342@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25343 }
25344 #endif
25345
25346-extern unsigned long find_max_low_pfn(void);
25347 extern unsigned long highend_pfn, highstart_pfn;
25348
25349 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25350diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25351index e1d1069..2251ff3 100644
25352--- a/arch/x86/mm/pageattr-test.c
25353+++ b/arch/x86/mm/pageattr-test.c
25354@@ -36,7 +36,7 @@ enum {
25355
25356 static int pte_testbit(pte_t pte)
25357 {
25358- return pte_flags(pte) & _PAGE_UNUSED1;
25359+ return pte_flags(pte) & _PAGE_CPA_TEST;
25360 }
25361
25362 struct split_state {
25363diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25364index dd38bfb..8c12306 100644
25365--- a/arch/x86/mm/pageattr.c
25366+++ b/arch/x86/mm/pageattr.c
25367@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25368 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25369 */
25370 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25371- pgprot_val(forbidden) |= _PAGE_NX;
25372+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25373
25374 /*
25375 * The kernel text needs to be executable for obvious reasons
25376 * Does not cover __inittext since that is gone later on. On
25377 * 64bit we do not enforce !NX on the low mapping
25378 */
25379- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25380- pgprot_val(forbidden) |= _PAGE_NX;
25381+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25382+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25383
25384+#ifdef CONFIG_DEBUG_RODATA
25385 /*
25386 * The .rodata section needs to be read-only. Using the pfn
25387 * catches all aliases.
25388@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25389 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25390 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25391 pgprot_val(forbidden) |= _PAGE_RW;
25392+#endif
25393+
25394+#ifdef CONFIG_PAX_KERNEXEC
25395+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25396+ pgprot_val(forbidden) |= _PAGE_RW;
25397+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25398+ }
25399+#endif
25400
25401 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25402
25403@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25404 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25405 {
25406 /* change init_mm */
25407+ pax_open_kernel();
25408 set_pte_atomic(kpte, pte);
25409+
25410 #ifdef CONFIG_X86_32
25411 if (!SHARED_KERNEL_PMD) {
25412+
25413+#ifdef CONFIG_PAX_PER_CPU_PGD
25414+ unsigned long cpu;
25415+#else
25416 struct page *page;
25417+#endif
25418
25419+#ifdef CONFIG_PAX_PER_CPU_PGD
25420+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25421+ pgd_t *pgd = get_cpu_pgd(cpu);
25422+#else
25423 list_for_each_entry(page, &pgd_list, lru) {
25424- pgd_t *pgd;
25425+ pgd_t *pgd = (pgd_t *)page_address(page);
25426+#endif
25427+
25428 pud_t *pud;
25429 pmd_t *pmd;
25430
25431- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25432+ pgd += pgd_index(address);
25433 pud = pud_offset(pgd, address);
25434 pmd = pmd_offset(pud, address);
25435 set_pte_atomic((pte_t *)pmd, pte);
25436 }
25437 }
25438 #endif
25439+ pax_close_kernel();
25440 }
25441
25442 static int
25443diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25444index e78cd0e..de0a817 100644
25445--- a/arch/x86/mm/pat.c
25446+++ b/arch/x86/mm/pat.c
25447@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25448
25449 conflict:
25450 printk(KERN_INFO "%s:%d conflicting memory types "
25451- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25452+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25453 new->end, cattr_name(new->type), cattr_name(entry->type));
25454 return -EBUSY;
25455 }
25456@@ -559,7 +559,7 @@ unlock_ret:
25457
25458 if (err) {
25459 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25460- current->comm, current->pid, start, end);
25461+ current->comm, task_pid_nr(current), start, end);
25462 }
25463
25464 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25465@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25466 while (cursor < to) {
25467 if (!devmem_is_allowed(pfn)) {
25468 printk(KERN_INFO
25469- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25470- current->comm, from, to);
25471+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25472+ current->comm, from, to, cursor);
25473 return 0;
25474 }
25475 cursor += PAGE_SIZE;
25476@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25477 printk(KERN_INFO
25478 "%s:%d ioremap_change_attr failed %s "
25479 "for %Lx-%Lx\n",
25480- current->comm, current->pid,
25481+ current->comm, task_pid_nr(current),
25482 cattr_name(flags),
25483 base, (unsigned long long)(base + size));
25484 return -EINVAL;
25485@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25486 free_memtype(paddr, paddr + size);
25487 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25488 " for %Lx-%Lx, got %s\n",
25489- current->comm, current->pid,
25490+ current->comm, task_pid_nr(current),
25491 cattr_name(want_flags),
25492 (unsigned long long)paddr,
25493 (unsigned long long)(paddr + size),
25494diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25495index df3d5c8..c2223e1 100644
25496--- a/arch/x86/mm/pf_in.c
25497+++ b/arch/x86/mm/pf_in.c
25498@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25499 int i;
25500 enum reason_type rv = OTHERS;
25501
25502- p = (unsigned char *)ins_addr;
25503+ p = (unsigned char *)ktla_ktva(ins_addr);
25504 p += skip_prefix(p, &prf);
25505 p += get_opcode(p, &opcode);
25506
25507@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25508 struct prefix_bits prf;
25509 int i;
25510
25511- p = (unsigned char *)ins_addr;
25512+ p = (unsigned char *)ktla_ktva(ins_addr);
25513 p += skip_prefix(p, &prf);
25514 p += get_opcode(p, &opcode);
25515
25516@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25517 struct prefix_bits prf;
25518 int i;
25519
25520- p = (unsigned char *)ins_addr;
25521+ p = (unsigned char *)ktla_ktva(ins_addr);
25522 p += skip_prefix(p, &prf);
25523 p += get_opcode(p, &opcode);
25524
25525@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25526 int i;
25527 unsigned long rv;
25528
25529- p = (unsigned char *)ins_addr;
25530+ p = (unsigned char *)ktla_ktva(ins_addr);
25531 p += skip_prefix(p, &prf);
25532 p += get_opcode(p, &opcode);
25533 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25534@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25535 int i;
25536 unsigned long rv;
25537
25538- p = (unsigned char *)ins_addr;
25539+ p = (unsigned char *)ktla_ktva(ins_addr);
25540 p += skip_prefix(p, &prf);
25541 p += get_opcode(p, &opcode);
25542 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25543diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25544index e0e6fad..6b90017 100644
25545--- a/arch/x86/mm/pgtable.c
25546+++ b/arch/x86/mm/pgtable.c
25547@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25548 list_del(&page->lru);
25549 }
25550
25551-#define UNSHARED_PTRS_PER_PGD \
25552- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25553+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25554+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25555
25556+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25557+{
25558+ while (count--)
25559+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25560+}
25561+#endif
25562+
25563+#ifdef CONFIG_PAX_PER_CPU_PGD
25564+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25565+{
25566+ while (count--)
25567+
25568+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25569+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25570+#else
25571+ *dst++ = *src++;
25572+#endif
25573+
25574+}
25575+#endif
25576+
25577+#ifdef CONFIG_X86_64
25578+#define pxd_t pud_t
25579+#define pyd_t pgd_t
25580+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25581+#define pxd_free(mm, pud) pud_free((mm), (pud))
25582+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25583+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25584+#define PYD_SIZE PGDIR_SIZE
25585+#else
25586+#define pxd_t pmd_t
25587+#define pyd_t pud_t
25588+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25589+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25590+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25591+#define pyd_offset(mm ,address) pud_offset((mm), (address))
25592+#define PYD_SIZE PUD_SIZE
25593+#endif
25594+
25595+#ifdef CONFIG_PAX_PER_CPU_PGD
25596+static inline void pgd_ctor(pgd_t *pgd) {}
25597+static inline void pgd_dtor(pgd_t *pgd) {}
25598+#else
25599 static void pgd_ctor(pgd_t *pgd)
25600 {
25601 /* If the pgd points to a shared pagetable level (either the
25602@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25603 pgd_list_del(pgd);
25604 spin_unlock_irqrestore(&pgd_lock, flags);
25605 }
25606+#endif
25607
25608 /*
25609 * List of all pgd's needed for non-PAE so it can invalidate entries
25610@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25611 * -- wli
25612 */
25613
25614-#ifdef CONFIG_X86_PAE
25615+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25616 /*
25617 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25618 * updating the top-level pagetable entries to guarantee the
25619@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25620 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25621 * and initialize the kernel pmds here.
25622 */
25623-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25624+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25625
25626 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25627 {
25628@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25629 */
25630 flush_tlb_mm(mm);
25631 }
25632+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25633+#define PREALLOCATED_PXDS USER_PGD_PTRS
25634 #else /* !CONFIG_X86_PAE */
25635
25636 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25637-#define PREALLOCATED_PMDS 0
25638+#define PREALLOCATED_PXDS 0
25639
25640 #endif /* CONFIG_X86_PAE */
25641
25642-static void free_pmds(pmd_t *pmds[])
25643+static void free_pxds(pxd_t *pxds[])
25644 {
25645 int i;
25646
25647- for(i = 0; i < PREALLOCATED_PMDS; i++)
25648- if (pmds[i])
25649- free_page((unsigned long)pmds[i]);
25650+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25651+ if (pxds[i])
25652+ free_page((unsigned long)pxds[i]);
25653 }
25654
25655-static int preallocate_pmds(pmd_t *pmds[])
25656+static int preallocate_pxds(pxd_t *pxds[])
25657 {
25658 int i;
25659 bool failed = false;
25660
25661- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25662- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25663- if (pmd == NULL)
25664+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25665+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25666+ if (pxd == NULL)
25667 failed = true;
25668- pmds[i] = pmd;
25669+ pxds[i] = pxd;
25670 }
25671
25672 if (failed) {
25673- free_pmds(pmds);
25674+ free_pxds(pxds);
25675 return -ENOMEM;
25676 }
25677
25678@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25679 * preallocate which never got a corresponding vma will need to be
25680 * freed manually.
25681 */
25682-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25683+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25684 {
25685 int i;
25686
25687- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25688+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25689 pgd_t pgd = pgdp[i];
25690
25691 if (pgd_val(pgd) != 0) {
25692- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25693+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25694
25695- pgdp[i] = native_make_pgd(0);
25696+ set_pgd(pgdp + i, native_make_pgd(0));
25697
25698- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25699- pmd_free(mm, pmd);
25700+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25701+ pxd_free(mm, pxd);
25702 }
25703 }
25704 }
25705
25706-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25707+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25708 {
25709- pud_t *pud;
25710+ pyd_t *pyd;
25711 unsigned long addr;
25712 int i;
25713
25714- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25715+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25716 return;
25717
25718- pud = pud_offset(pgd, 0);
25719+#ifdef CONFIG_X86_64
25720+ pyd = pyd_offset(mm, 0L);
25721+#else
25722+ pyd = pyd_offset(pgd, 0L);
25723+#endif
25724
25725- for (addr = i = 0; i < PREALLOCATED_PMDS;
25726- i++, pud++, addr += PUD_SIZE) {
25727- pmd_t *pmd = pmds[i];
25728+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25729+ i++, pyd++, addr += PYD_SIZE) {
25730+ pxd_t *pxd = pxds[i];
25731
25732 if (i >= KERNEL_PGD_BOUNDARY)
25733- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25734- sizeof(pmd_t) * PTRS_PER_PMD);
25735+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25736+ sizeof(pxd_t) * PTRS_PER_PMD);
25737
25738- pud_populate(mm, pud, pmd);
25739+ pyd_populate(mm, pyd, pxd);
25740 }
25741 }
25742
25743 pgd_t *pgd_alloc(struct mm_struct *mm)
25744 {
25745 pgd_t *pgd;
25746- pmd_t *pmds[PREALLOCATED_PMDS];
25747+ pxd_t *pxds[PREALLOCATED_PXDS];
25748+
25749 unsigned long flags;
25750
25751 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25752@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25753
25754 mm->pgd = pgd;
25755
25756- if (preallocate_pmds(pmds) != 0)
25757+ if (preallocate_pxds(pxds) != 0)
25758 goto out_free_pgd;
25759
25760 if (paravirt_pgd_alloc(mm) != 0)
25761- goto out_free_pmds;
25762+ goto out_free_pxds;
25763
25764 /*
25765 * Make sure that pre-populating the pmds is atomic with
25766@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25767 spin_lock_irqsave(&pgd_lock, flags);
25768
25769 pgd_ctor(pgd);
25770- pgd_prepopulate_pmd(mm, pgd, pmds);
25771+ pgd_prepopulate_pxd(mm, pgd, pxds);
25772
25773 spin_unlock_irqrestore(&pgd_lock, flags);
25774
25775 return pgd;
25776
25777-out_free_pmds:
25778- free_pmds(pmds);
25779+out_free_pxds:
25780+ free_pxds(pxds);
25781 out_free_pgd:
25782 free_page((unsigned long)pgd);
25783 out:
25784@@ -287,7 +338,7 @@ out:
25785
25786 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25787 {
25788- pgd_mop_up_pmds(mm, pgd);
25789+ pgd_mop_up_pxds(mm, pgd);
25790 pgd_dtor(pgd);
25791 paravirt_pgd_free(mm, pgd);
25792 free_page((unsigned long)pgd);
25793diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25794index 46c8834..fcab43d 100644
25795--- a/arch/x86/mm/pgtable_32.c
25796+++ b/arch/x86/mm/pgtable_32.c
25797@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25798 return;
25799 }
25800 pte = pte_offset_kernel(pmd, vaddr);
25801+
25802+ pax_open_kernel();
25803 if (pte_val(pteval))
25804 set_pte_at(&init_mm, vaddr, pte, pteval);
25805 else
25806 pte_clear(&init_mm, vaddr, pte);
25807+ pax_close_kernel();
25808
25809 /*
25810 * It's enough to flush this one mapping.
25811diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25812index 513d8ed..978c161 100644
25813--- a/arch/x86/mm/setup_nx.c
25814+++ b/arch/x86/mm/setup_nx.c
25815@@ -4,11 +4,10 @@
25816
25817 #include <asm/pgtable.h>
25818
25819+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25820 int nx_enabled;
25821
25822-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25823-static int disable_nx __cpuinitdata;
25824-
25825+#ifndef CONFIG_PAX_PAGEEXEC
25826 /*
25827 * noexec = on|off
25828 *
25829@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
25830 if (!str)
25831 return -EINVAL;
25832 if (!strncmp(str, "on", 2)) {
25833- __supported_pte_mask |= _PAGE_NX;
25834- disable_nx = 0;
25835+ nx_enabled = 1;
25836 } else if (!strncmp(str, "off", 3)) {
25837- disable_nx = 1;
25838- __supported_pte_mask &= ~_PAGE_NX;
25839+ nx_enabled = 0;
25840 }
25841 return 0;
25842 }
25843 early_param("noexec", noexec_setup);
25844 #endif
25845+#endif
25846
25847 #ifdef CONFIG_X86_PAE
25848 void __init set_nx(void)
25849 {
25850- unsigned int v[4], l, h;
25851+ if (!nx_enabled && cpu_has_nx) {
25852+ unsigned l, h;
25853
25854- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
25855- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
25856-
25857- if ((v[3] & (1 << 20)) && !disable_nx) {
25858- rdmsr(MSR_EFER, l, h);
25859- l |= EFER_NX;
25860- wrmsr(MSR_EFER, l, h);
25861- nx_enabled = 1;
25862- __supported_pte_mask |= _PAGE_NX;
25863- }
25864+ __supported_pte_mask &= ~_PAGE_NX;
25865+ rdmsr(MSR_EFER, l, h);
25866+ l &= ~EFER_NX;
25867+ wrmsr(MSR_EFER, l, h);
25868 }
25869 }
25870 #else
25871@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
25872 unsigned long efer;
25873
25874 rdmsrl(MSR_EFER, efer);
25875- if (!(efer & EFER_NX) || disable_nx)
25876+ if (!(efer & EFER_NX) || !nx_enabled)
25877 __supported_pte_mask &= ~_PAGE_NX;
25878 }
25879 #endif
25880diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25881index 36fe08e..b123d3a 100644
25882--- a/arch/x86/mm/tlb.c
25883+++ b/arch/x86/mm/tlb.c
25884@@ -61,7 +61,11 @@ void leave_mm(int cpu)
25885 BUG();
25886 cpumask_clear_cpu(cpu,
25887 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25888+
25889+#ifndef CONFIG_PAX_PER_CPU_PGD
25890 load_cr3(swapper_pg_dir);
25891+#endif
25892+
25893 }
25894 EXPORT_SYMBOL_GPL(leave_mm);
25895
25896diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25897index 044897b..a195924 100644
25898--- a/arch/x86/oprofile/backtrace.c
25899+++ b/arch/x86/oprofile/backtrace.c
25900@@ -57,7 +57,7 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head)
25901 struct frame_head bufhead[2];
25902
25903 /* Also check accessibility of one struct frame_head beyond */
25904- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
25905+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
25906 return NULL;
25907 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
25908 return NULL;
25909@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25910 {
25911 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
25912
25913- if (!user_mode_vm(regs)) {
25914+ if (!user_mode(regs)) {
25915 unsigned long stack = kernel_stack_pointer(regs);
25916 if (depth)
25917 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25918diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
25919index e6a160a..36deff6 100644
25920--- a/arch/x86/oprofile/op_model_p4.c
25921+++ b/arch/x86/oprofile/op_model_p4.c
25922@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
25923 #endif
25924 }
25925
25926-static int inline addr_increment(void)
25927+static inline int addr_increment(void)
25928 {
25929 #ifdef CONFIG_SMP
25930 return smp_num_siblings == 2 ? 2 : 1;
25931diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
25932index 1331fcf..03901b2 100644
25933--- a/arch/x86/pci/common.c
25934+++ b/arch/x86/pci/common.c
25935@@ -31,8 +31,8 @@ int noioapicreroute = 1;
25936 int pcibios_last_bus = -1;
25937 unsigned long pirq_table_addr;
25938 struct pci_bus *pci_root_bus;
25939-struct pci_raw_ops *raw_pci_ops;
25940-struct pci_raw_ops *raw_pci_ext_ops;
25941+const struct pci_raw_ops *raw_pci_ops;
25942+const struct pci_raw_ops *raw_pci_ext_ops;
25943
25944 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
25945 int reg, int len, u32 *val)
25946diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
25947index 347d882..4baf6b6 100644
25948--- a/arch/x86/pci/direct.c
25949+++ b/arch/x86/pci/direct.c
25950@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
25951
25952 #undef PCI_CONF1_ADDRESS
25953
25954-struct pci_raw_ops pci_direct_conf1 = {
25955+const struct pci_raw_ops pci_direct_conf1 = {
25956 .read = pci_conf1_read,
25957 .write = pci_conf1_write,
25958 };
25959@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
25960
25961 #undef PCI_CONF2_ADDRESS
25962
25963-struct pci_raw_ops pci_direct_conf2 = {
25964+const struct pci_raw_ops pci_direct_conf2 = {
25965 .read = pci_conf2_read,
25966 .write = pci_conf2_write,
25967 };
25968@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
25969 * This should be close to trivial, but it isn't, because there are buggy
25970 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
25971 */
25972-static int __init pci_sanity_check(struct pci_raw_ops *o)
25973+static int __init pci_sanity_check(const struct pci_raw_ops *o)
25974 {
25975 u32 x = 0;
25976 int year, devfn;
25977diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
25978index f10a7e9..0425342 100644
25979--- a/arch/x86/pci/mmconfig_32.c
25980+++ b/arch/x86/pci/mmconfig_32.c
25981@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
25982 return 0;
25983 }
25984
25985-static struct pci_raw_ops pci_mmcfg = {
25986+static const struct pci_raw_ops pci_mmcfg = {
25987 .read = pci_mmcfg_read,
25988 .write = pci_mmcfg_write,
25989 };
25990diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
25991index 94349f8..41600a7 100644
25992--- a/arch/x86/pci/mmconfig_64.c
25993+++ b/arch/x86/pci/mmconfig_64.c
25994@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
25995 return 0;
25996 }
25997
25998-static struct pci_raw_ops pci_mmcfg = {
25999+static const struct pci_raw_ops pci_mmcfg = {
26000 .read = pci_mmcfg_read,
26001 .write = pci_mmcfg_write,
26002 };
26003diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26004index 8eb295e..86bd657 100644
26005--- a/arch/x86/pci/numaq_32.c
26006+++ b/arch/x86/pci/numaq_32.c
26007@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26008
26009 #undef PCI_CONF1_MQ_ADDRESS
26010
26011-static struct pci_raw_ops pci_direct_conf1_mq = {
26012+static const struct pci_raw_ops pci_direct_conf1_mq = {
26013 .read = pci_conf1_mq_read,
26014 .write = pci_conf1_mq_write
26015 };
26016diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26017index b889d82..5a58a0a 100644
26018--- a/arch/x86/pci/olpc.c
26019+++ b/arch/x86/pci/olpc.c
26020@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26021 return 0;
26022 }
26023
26024-static struct pci_raw_ops pci_olpc_conf = {
26025+static const struct pci_raw_ops pci_olpc_conf = {
26026 .read = pci_olpc_read,
26027 .write = pci_olpc_write,
26028 };
26029diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26030index 1c975cc..ffd0536 100644
26031--- a/arch/x86/pci/pcbios.c
26032+++ b/arch/x86/pci/pcbios.c
26033@@ -56,50 +56,93 @@ union bios32 {
26034 static struct {
26035 unsigned long address;
26036 unsigned short segment;
26037-} bios32_indirect = { 0, __KERNEL_CS };
26038+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26039
26040 /*
26041 * Returns the entry point for the given service, NULL on error
26042 */
26043
26044-static unsigned long bios32_service(unsigned long service)
26045+static unsigned long __devinit bios32_service(unsigned long service)
26046 {
26047 unsigned char return_code; /* %al */
26048 unsigned long address; /* %ebx */
26049 unsigned long length; /* %ecx */
26050 unsigned long entry; /* %edx */
26051 unsigned long flags;
26052+ struct desc_struct d, *gdt;
26053
26054 local_irq_save(flags);
26055- __asm__("lcall *(%%edi); cld"
26056+
26057+ gdt = get_cpu_gdt_table(smp_processor_id());
26058+
26059+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26060+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26061+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26062+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26063+
26064+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26065 : "=a" (return_code),
26066 "=b" (address),
26067 "=c" (length),
26068 "=d" (entry)
26069 : "0" (service),
26070 "1" (0),
26071- "D" (&bios32_indirect));
26072+ "D" (&bios32_indirect),
26073+ "r"(__PCIBIOS_DS)
26074+ : "memory");
26075+
26076+ pax_open_kernel();
26077+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26078+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26079+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26080+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26081+ pax_close_kernel();
26082+
26083 local_irq_restore(flags);
26084
26085 switch (return_code) {
26086- case 0:
26087- return address + entry;
26088- case 0x80: /* Not present */
26089- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26090- return 0;
26091- default: /* Shouldn't happen */
26092- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26093- service, return_code);
26094+ case 0: {
26095+ int cpu;
26096+ unsigned char flags;
26097+
26098+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26099+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26100+ printk(KERN_WARNING "bios32_service: not valid\n");
26101 return 0;
26102+ }
26103+ address = address + PAGE_OFFSET;
26104+ length += 16UL; /* some BIOSs underreport this... */
26105+ flags = 4;
26106+ if (length >= 64*1024*1024) {
26107+ length >>= PAGE_SHIFT;
26108+ flags |= 8;
26109+ }
26110+
26111+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
26112+ gdt = get_cpu_gdt_table(cpu);
26113+ pack_descriptor(&d, address, length, 0x9b, flags);
26114+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26115+ pack_descriptor(&d, address, length, 0x93, flags);
26116+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26117+ }
26118+ return entry;
26119+ }
26120+ case 0x80: /* Not present */
26121+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26122+ return 0;
26123+ default: /* Shouldn't happen */
26124+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26125+ service, return_code);
26126+ return 0;
26127 }
26128 }
26129
26130 static struct {
26131 unsigned long address;
26132 unsigned short segment;
26133-} pci_indirect = { 0, __KERNEL_CS };
26134+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26135
26136-static int pci_bios_present;
26137+static int pci_bios_present __read_only;
26138
26139 static int __devinit check_pcibios(void)
26140 {
26141@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26142 unsigned long flags, pcibios_entry;
26143
26144 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26145- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26146+ pci_indirect.address = pcibios_entry;
26147
26148 local_irq_save(flags);
26149- __asm__(
26150- "lcall *(%%edi); cld\n\t"
26151+ __asm__("movw %w6, %%ds\n\t"
26152+ "lcall *%%ss:(%%edi); cld\n\t"
26153+ "push %%ss\n\t"
26154+ "pop %%ds\n\t"
26155 "jc 1f\n\t"
26156 "xor %%ah, %%ah\n"
26157 "1:"
26158@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26159 "=b" (ebx),
26160 "=c" (ecx)
26161 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26162- "D" (&pci_indirect)
26163+ "D" (&pci_indirect),
26164+ "r" (__PCIBIOS_DS)
26165 : "memory");
26166 local_irq_restore(flags);
26167
26168@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26169
26170 switch (len) {
26171 case 1:
26172- __asm__("lcall *(%%esi); cld\n\t"
26173+ __asm__("movw %w6, %%ds\n\t"
26174+ "lcall *%%ss:(%%esi); cld\n\t"
26175+ "push %%ss\n\t"
26176+ "pop %%ds\n\t"
26177 "jc 1f\n\t"
26178 "xor %%ah, %%ah\n"
26179 "1:"
26180@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26181 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26182 "b" (bx),
26183 "D" ((long)reg),
26184- "S" (&pci_indirect));
26185+ "S" (&pci_indirect),
26186+ "r" (__PCIBIOS_DS));
26187 /*
26188 * Zero-extend the result beyond 8 bits, do not trust the
26189 * BIOS having done it:
26190@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26191 *value &= 0xff;
26192 break;
26193 case 2:
26194- __asm__("lcall *(%%esi); cld\n\t"
26195+ __asm__("movw %w6, %%ds\n\t"
26196+ "lcall *%%ss:(%%esi); cld\n\t"
26197+ "push %%ss\n\t"
26198+ "pop %%ds\n\t"
26199 "jc 1f\n\t"
26200 "xor %%ah, %%ah\n"
26201 "1:"
26202@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26203 : "1" (PCIBIOS_READ_CONFIG_WORD),
26204 "b" (bx),
26205 "D" ((long)reg),
26206- "S" (&pci_indirect));
26207+ "S" (&pci_indirect),
26208+ "r" (__PCIBIOS_DS));
26209 /*
26210 * Zero-extend the result beyond 16 bits, do not trust the
26211 * BIOS having done it:
26212@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26213 *value &= 0xffff;
26214 break;
26215 case 4:
26216- __asm__("lcall *(%%esi); cld\n\t"
26217+ __asm__("movw %w6, %%ds\n\t"
26218+ "lcall *%%ss:(%%esi); cld\n\t"
26219+ "push %%ss\n\t"
26220+ "pop %%ds\n\t"
26221 "jc 1f\n\t"
26222 "xor %%ah, %%ah\n"
26223 "1:"
26224@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26225 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26226 "b" (bx),
26227 "D" ((long)reg),
26228- "S" (&pci_indirect));
26229+ "S" (&pci_indirect),
26230+ "r" (__PCIBIOS_DS));
26231 break;
26232 }
26233
26234@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26235
26236 switch (len) {
26237 case 1:
26238- __asm__("lcall *(%%esi); cld\n\t"
26239+ __asm__("movw %w6, %%ds\n\t"
26240+ "lcall *%%ss:(%%esi); cld\n\t"
26241+ "push %%ss\n\t"
26242+ "pop %%ds\n\t"
26243 "jc 1f\n\t"
26244 "xor %%ah, %%ah\n"
26245 "1:"
26246@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26247 "c" (value),
26248 "b" (bx),
26249 "D" ((long)reg),
26250- "S" (&pci_indirect));
26251+ "S" (&pci_indirect),
26252+ "r" (__PCIBIOS_DS));
26253 break;
26254 case 2:
26255- __asm__("lcall *(%%esi); cld\n\t"
26256+ __asm__("movw %w6, %%ds\n\t"
26257+ "lcall *%%ss:(%%esi); cld\n\t"
26258+ "push %%ss\n\t"
26259+ "pop %%ds\n\t"
26260 "jc 1f\n\t"
26261 "xor %%ah, %%ah\n"
26262 "1:"
26263@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26264 "c" (value),
26265 "b" (bx),
26266 "D" ((long)reg),
26267- "S" (&pci_indirect));
26268+ "S" (&pci_indirect),
26269+ "r" (__PCIBIOS_DS));
26270 break;
26271 case 4:
26272- __asm__("lcall *(%%esi); cld\n\t"
26273+ __asm__("movw %w6, %%ds\n\t"
26274+ "lcall *%%ss:(%%esi); cld\n\t"
26275+ "push %%ss\n\t"
26276+ "pop %%ds\n\t"
26277 "jc 1f\n\t"
26278 "xor %%ah, %%ah\n"
26279 "1:"
26280@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26281 "c" (value),
26282 "b" (bx),
26283 "D" ((long)reg),
26284- "S" (&pci_indirect));
26285+ "S" (&pci_indirect),
26286+ "r" (__PCIBIOS_DS));
26287 break;
26288 }
26289
26290@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26291 * Function table for BIOS32 access
26292 */
26293
26294-static struct pci_raw_ops pci_bios_access = {
26295+static const struct pci_raw_ops pci_bios_access = {
26296 .read = pci_bios_read,
26297 .write = pci_bios_write
26298 };
26299@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26300 * Try to find PCI BIOS.
26301 */
26302
26303-static struct pci_raw_ops * __devinit pci_find_bios(void)
26304+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26305 {
26306 union bios32 *check;
26307 unsigned char sum;
26308@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26309
26310 DBG("PCI: Fetching IRQ routing table... ");
26311 __asm__("push %%es\n\t"
26312+ "movw %w8, %%ds\n\t"
26313 "push %%ds\n\t"
26314 "pop %%es\n\t"
26315- "lcall *(%%esi); cld\n\t"
26316+ "lcall *%%ss:(%%esi); cld\n\t"
26317 "pop %%es\n\t"
26318+ "push %%ss\n\t"
26319+ "pop %%ds\n"
26320 "jc 1f\n\t"
26321 "xor %%ah, %%ah\n"
26322 "1:"
26323@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26324 "1" (0),
26325 "D" ((long) &opt),
26326 "S" (&pci_indirect),
26327- "m" (opt)
26328+ "m" (opt),
26329+ "r" (__PCIBIOS_DS)
26330 : "memory");
26331 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26332 if (ret & 0xff00)
26333@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26334 {
26335 int ret;
26336
26337- __asm__("lcall *(%%esi); cld\n\t"
26338+ __asm__("movw %w5, %%ds\n\t"
26339+ "lcall *%%ss:(%%esi); cld\n\t"
26340+ "push %%ss\n\t"
26341+ "pop %%ds\n"
26342 "jc 1f\n\t"
26343 "xor %%ah, %%ah\n"
26344 "1:"
26345@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26346 : "0" (PCIBIOS_SET_PCI_HW_INT),
26347 "b" ((dev->bus->number << 8) | dev->devfn),
26348 "c" ((irq << 8) | (pin + 10)),
26349- "S" (&pci_indirect));
26350+ "S" (&pci_indirect),
26351+ "r" (__PCIBIOS_DS));
26352 return !(ret & 0xff00);
26353 }
26354 EXPORT_SYMBOL(pcibios_set_irq_routing);
26355diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26356index fa0f651..9d8f3d9 100644
26357--- a/arch/x86/power/cpu.c
26358+++ b/arch/x86/power/cpu.c
26359@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26360 static void fix_processor_context(void)
26361 {
26362 int cpu = smp_processor_id();
26363- struct tss_struct *t = &per_cpu(init_tss, cpu);
26364+ struct tss_struct *t = init_tss + cpu;
26365
26366 set_tss_desc(cpu, t); /*
26367 * This just modifies memory; should not be
26368@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26369 */
26370
26371 #ifdef CONFIG_X86_64
26372+ pax_open_kernel();
26373 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26374+ pax_close_kernel();
26375
26376 syscall_init(); /* This sets MSR_*STAR and related */
26377 #endif
26378diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26379index dd78ef6..f9d928d 100644
26380--- a/arch/x86/vdso/Makefile
26381+++ b/arch/x86/vdso/Makefile
26382@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26383 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26384 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26385
26386-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26387+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26388 GCOV_PROFILE := n
26389
26390 #
26391diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26392index ee55754..0013b2e 100644
26393--- a/arch/x86/vdso/vclock_gettime.c
26394+++ b/arch/x86/vdso/vclock_gettime.c
26395@@ -22,24 +22,48 @@
26396 #include <asm/hpet.h>
26397 #include <asm/unistd.h>
26398 #include <asm/io.h>
26399+#include <asm/fixmap.h>
26400 #include "vextern.h"
26401
26402 #define gtod vdso_vsyscall_gtod_data
26403
26404+notrace noinline long __vdso_fallback_time(long *t)
26405+{
26406+ long secs;
26407+ asm volatile("syscall"
26408+ : "=a" (secs)
26409+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26410+ return secs;
26411+}
26412+
26413 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26414 {
26415 long ret;
26416 asm("syscall" : "=a" (ret) :
26417- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26418+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26419 return ret;
26420 }
26421
26422+notrace static inline cycle_t __vdso_vread_hpet(void)
26423+{
26424+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26425+}
26426+
26427+notrace static inline cycle_t __vdso_vread_tsc(void)
26428+{
26429+ cycle_t ret = (cycle_t)vget_cycles();
26430+
26431+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26432+}
26433+
26434 notrace static inline long vgetns(void)
26435 {
26436 long v;
26437- cycles_t (*vread)(void);
26438- vread = gtod->clock.vread;
26439- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26440+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26441+ v = __vdso_vread_tsc();
26442+ else
26443+ v = __vdso_vread_hpet();
26444+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26445 return (v * gtod->clock.mult) >> gtod->clock.shift;
26446 }
26447
26448@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26449
26450 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26451 {
26452- if (likely(gtod->sysctl_enabled))
26453+ if (likely(gtod->sysctl_enabled &&
26454+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26455+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26456 switch (clock) {
26457 case CLOCK_REALTIME:
26458 if (likely(gtod->clock.vread))
26459@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26460 int clock_gettime(clockid_t, struct timespec *)
26461 __attribute__((weak, alias("__vdso_clock_gettime")));
26462
26463-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26464+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26465 {
26466 long ret;
26467- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26468+ asm("syscall" : "=a" (ret) :
26469+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26470+ return ret;
26471+}
26472+
26473+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26474+{
26475+ if (likely(gtod->sysctl_enabled &&
26476+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26477+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26478+ {
26479 if (likely(tv != NULL)) {
26480 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26481 offsetof(struct timespec, tv_nsec) ||
26482@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26483 }
26484 return 0;
26485 }
26486- asm("syscall" : "=a" (ret) :
26487- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26488- return ret;
26489+ return __vdso_fallback_gettimeofday(tv, tz);
26490 }
26491 int gettimeofday(struct timeval *, struct timezone *)
26492 __attribute__((weak, alias("__vdso_gettimeofday")));
26493diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26494index 4e5dd3b..00ba15e 100644
26495--- a/arch/x86/vdso/vdso.lds.S
26496+++ b/arch/x86/vdso/vdso.lds.S
26497@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26498 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26499 #include "vextern.h"
26500 #undef VEXTERN
26501+
26502+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26503+VEXTERN(fallback_gettimeofday)
26504+VEXTERN(fallback_time)
26505+VEXTERN(getcpu)
26506+#undef VEXTERN
26507diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26508index 58bc00f..d53fb48 100644
26509--- a/arch/x86/vdso/vdso32-setup.c
26510+++ b/arch/x86/vdso/vdso32-setup.c
26511@@ -25,6 +25,7 @@
26512 #include <asm/tlbflush.h>
26513 #include <asm/vdso.h>
26514 #include <asm/proto.h>
26515+#include <asm/mman.h>
26516
26517 enum {
26518 VDSO_DISABLED = 0,
26519@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26520 void enable_sep_cpu(void)
26521 {
26522 int cpu = get_cpu();
26523- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26524+ struct tss_struct *tss = init_tss + cpu;
26525
26526 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26527 put_cpu();
26528@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26529 gate_vma.vm_start = FIXADDR_USER_START;
26530 gate_vma.vm_end = FIXADDR_USER_END;
26531 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26532- gate_vma.vm_page_prot = __P101;
26533+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26534 /*
26535 * Make sure the vDSO gets into every core dump.
26536 * Dumping its contents makes post-mortem fully interpretable later
26537@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26538 if (compat)
26539 addr = VDSO_HIGH_BASE;
26540 else {
26541- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26542+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26543 if (IS_ERR_VALUE(addr)) {
26544 ret = addr;
26545 goto up_fail;
26546 }
26547 }
26548
26549- current->mm->context.vdso = (void *)addr;
26550+ current->mm->context.vdso = addr;
26551
26552 if (compat_uses_vma || !compat) {
26553 /*
26554@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26555 }
26556
26557 current_thread_info()->sysenter_return =
26558- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26559+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26560
26561 up_fail:
26562 if (ret)
26563- current->mm->context.vdso = NULL;
26564+ current->mm->context.vdso = 0;
26565
26566 up_write(&mm->mmap_sem);
26567
26568@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26569
26570 const char *arch_vma_name(struct vm_area_struct *vma)
26571 {
26572- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26573+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26574 return "[vdso]";
26575+
26576+#ifdef CONFIG_PAX_SEGMEXEC
26577+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26578+ return "[vdso]";
26579+#endif
26580+
26581 return NULL;
26582 }
26583
26584@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26585 struct mm_struct *mm = tsk->mm;
26586
26587 /* Check to see if this task was created in compat vdso mode */
26588- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26589+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26590 return &gate_vma;
26591 return NULL;
26592 }
26593diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26594index 1683ba2..48d07f3 100644
26595--- a/arch/x86/vdso/vextern.h
26596+++ b/arch/x86/vdso/vextern.h
26597@@ -11,6 +11,5 @@
26598 put into vextern.h and be referenced as a pointer with vdso prefix.
26599 The main kernel later fills in the values. */
26600
26601-VEXTERN(jiffies)
26602 VEXTERN(vgetcpu_mode)
26603 VEXTERN(vsyscall_gtod_data)
26604diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26605index 21e1aeb..2c0b3c4 100644
26606--- a/arch/x86/vdso/vma.c
26607+++ b/arch/x86/vdso/vma.c
26608@@ -17,8 +17,6 @@
26609 #include "vextern.h" /* Just for VMAGIC. */
26610 #undef VEXTERN
26611
26612-unsigned int __read_mostly vdso_enabled = 1;
26613-
26614 extern char vdso_start[], vdso_end[];
26615 extern unsigned short vdso_sync_cpuid;
26616
26617@@ -27,10 +25,8 @@ static unsigned vdso_size;
26618
26619 static inline void *var_ref(void *p, char *name)
26620 {
26621- if (*(void **)p != (void *)VMAGIC) {
26622- printk("VDSO: variable %s broken\n", name);
26623- vdso_enabled = 0;
26624- }
26625+ if (*(void **)p != (void *)VMAGIC)
26626+ panic("VDSO: variable %s broken\n", name);
26627 return p;
26628 }
26629
26630@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26631 if (!vbase)
26632 goto oom;
26633
26634- if (memcmp(vbase, "\177ELF", 4)) {
26635- printk("VDSO: I'm broken; not ELF\n");
26636- vdso_enabled = 0;
26637- }
26638+ if (memcmp(vbase, ELFMAG, SELFMAG))
26639+ panic("VDSO: I'm broken; not ELF\n");
26640
26641 #define VEXTERN(x) \
26642 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26643 #include "vextern.h"
26644 #undef VEXTERN
26645+ vunmap(vbase);
26646 return 0;
26647
26648 oom:
26649- printk("Cannot allocate vdso\n");
26650- vdso_enabled = 0;
26651- return -ENOMEM;
26652+ panic("Cannot allocate vdso\n");
26653 }
26654 __initcall(init_vdso_vars);
26655
26656@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26657 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26658 {
26659 struct mm_struct *mm = current->mm;
26660- unsigned long addr;
26661+ unsigned long addr = 0;
26662 int ret;
26663
26664- if (!vdso_enabled)
26665- return 0;
26666-
26667 down_write(&mm->mmap_sem);
26668+
26669+#ifdef CONFIG_PAX_RANDMMAP
26670+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26671+#endif
26672+
26673 addr = vdso_addr(mm->start_stack, vdso_size);
26674 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26675 if (IS_ERR_VALUE(addr)) {
26676@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26677 goto up_fail;
26678 }
26679
26680- current->mm->context.vdso = (void *)addr;
26681+ current->mm->context.vdso = addr;
26682
26683 ret = install_special_mapping(mm, addr, vdso_size,
26684 VM_READ|VM_EXEC|
26685@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26686 VM_ALWAYSDUMP,
26687 vdso_pages);
26688 if (ret) {
26689- current->mm->context.vdso = NULL;
26690+ current->mm->context.vdso = 0;
26691 goto up_fail;
26692 }
26693
26694@@ -132,10 +127,3 @@ up_fail:
26695 up_write(&mm->mmap_sem);
26696 return ret;
26697 }
26698-
26699-static __init int vdso_setup(char *s)
26700-{
26701- vdso_enabled = simple_strtoul(s, NULL, 0);
26702- return 0;
26703-}
26704-__setup("vdso=", vdso_setup);
26705diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26706index 0087b00..eecb34f 100644
26707--- a/arch/x86/xen/enlighten.c
26708+++ b/arch/x86/xen/enlighten.c
26709@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26710
26711 struct shared_info xen_dummy_shared_info;
26712
26713-void *xen_initial_gdt;
26714-
26715 /*
26716 * Point at some empty memory to start with. We map the real shared_info
26717 * page as soon as fixmap is up and running.
26718@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26719
26720 preempt_disable();
26721
26722- start = __get_cpu_var(idt_desc).address;
26723+ start = (unsigned long)__get_cpu_var(idt_desc).address;
26724 end = start + __get_cpu_var(idt_desc).size + 1;
26725
26726 xen_mc_flush();
26727@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26728 #endif
26729 };
26730
26731-static void xen_reboot(int reason)
26732+static __noreturn void xen_reboot(int reason)
26733 {
26734 struct sched_shutdown r = { .reason = reason };
26735
26736@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26737 BUG();
26738 }
26739
26740-static void xen_restart(char *msg)
26741+static __noreturn void xen_restart(char *msg)
26742 {
26743 xen_reboot(SHUTDOWN_reboot);
26744 }
26745
26746-static void xen_emergency_restart(void)
26747+static __noreturn void xen_emergency_restart(void)
26748 {
26749 xen_reboot(SHUTDOWN_reboot);
26750 }
26751
26752-static void xen_machine_halt(void)
26753+static __noreturn void xen_machine_halt(void)
26754 {
26755 xen_reboot(SHUTDOWN_poweroff);
26756 }
26757@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26758 */
26759 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26760
26761-#ifdef CONFIG_X86_64
26762 /* Work out if we support NX */
26763- check_efer();
26764+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26765+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26766+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26767+ unsigned l, h;
26768+
26769+#ifdef CONFIG_X86_PAE
26770+ nx_enabled = 1;
26771+#endif
26772+ __supported_pte_mask |= _PAGE_NX;
26773+ rdmsr(MSR_EFER, l, h);
26774+ l |= EFER_NX;
26775+ wrmsr(MSR_EFER, l, h);
26776+ }
26777 #endif
26778
26779 xen_setup_features();
26780@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26781
26782 machine_ops = xen_machine_ops;
26783
26784- /*
26785- * The only reliable way to retain the initial address of the
26786- * percpu gdt_page is to remember it here, so we can go and
26787- * mark it RW later, when the initial percpu area is freed.
26788- */
26789- xen_initial_gdt = &per_cpu(gdt_page, 0);
26790-
26791 xen_smp_init();
26792
26793 pgd = (pgd_t *)xen_start_info->pt_base;
26794diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26795index 3f90a2c..ee0d992 100644
26796--- a/arch/x86/xen/mmu.c
26797+++ b/arch/x86/xen/mmu.c
26798@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26799 convert_pfn_mfn(init_level4_pgt);
26800 convert_pfn_mfn(level3_ident_pgt);
26801 convert_pfn_mfn(level3_kernel_pgt);
26802+ convert_pfn_mfn(level3_vmalloc_pgt);
26803+ convert_pfn_mfn(level3_vmemmap_pgt);
26804
26805 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26806 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26807@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26808 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26809 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26810 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26811+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
26812+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26813 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26814+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26815 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26816 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26817
26818@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_init(void)
26819 pv_mmu_ops.set_pud = xen_set_pud;
26820 #if PAGETABLE_LEVELS == 4
26821 pv_mmu_ops.set_pgd = xen_set_pgd;
26822+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26823 #endif
26824
26825 /* This will work as long as patching hasn't happened yet
26826@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
26827 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26828 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26829 .set_pgd = xen_set_pgd_hyper,
26830+ .set_pgd_batched = xen_set_pgd_hyper,
26831
26832 .alloc_pud = xen_alloc_pmd_init,
26833 .release_pud = xen_release_pmd_init,
26834diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26835index a96204a..fca9b8e 100644
26836--- a/arch/x86/xen/smp.c
26837+++ b/arch/x86/xen/smp.c
26838@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26839 {
26840 BUG_ON(smp_processor_id() != 0);
26841 native_smp_prepare_boot_cpu();
26842-
26843- /* We've switched to the "real" per-cpu gdt, so make sure the
26844- old memory can be recycled */
26845- make_lowmem_page_readwrite(xen_initial_gdt);
26846-
26847 xen_setup_vcpu_info_placement();
26848 }
26849
26850@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26851 gdt = get_cpu_gdt_table(cpu);
26852
26853 ctxt->flags = VGCF_IN_KERNEL;
26854- ctxt->user_regs.ds = __USER_DS;
26855- ctxt->user_regs.es = __USER_DS;
26856+ ctxt->user_regs.ds = __KERNEL_DS;
26857+ ctxt->user_regs.es = __KERNEL_DS;
26858 ctxt->user_regs.ss = __KERNEL_DS;
26859 #ifdef CONFIG_X86_32
26860 ctxt->user_regs.fs = __KERNEL_PERCPU;
26861- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26862+ savesegment(gs, ctxt->user_regs.gs);
26863 #else
26864 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26865 #endif
26866@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26867 int rc;
26868
26869 per_cpu(current_task, cpu) = idle;
26870+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26871 #ifdef CONFIG_X86_32
26872 irq_ctx_init(cpu);
26873 #else
26874 clear_tsk_thread_flag(idle, TIF_FORK);
26875- per_cpu(kernel_stack, cpu) =
26876- (unsigned long)task_stack_page(idle) -
26877- KERNEL_STACK_OFFSET + THREAD_SIZE;
26878+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26879 #endif
26880 xen_setup_runstate_info(cpu);
26881 xen_setup_timer(cpu);
26882diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26883index 9a95a9c..4f39e774 100644
26884--- a/arch/x86/xen/xen-asm_32.S
26885+++ b/arch/x86/xen/xen-asm_32.S
26886@@ -83,14 +83,14 @@ ENTRY(xen_iret)
26887 ESP_OFFSET=4 # bytes pushed onto stack
26888
26889 /*
26890- * Store vcpu_info pointer for easy access. Do it this way to
26891- * avoid having to reload %fs
26892+ * Store vcpu_info pointer for easy access.
26893 */
26894 #ifdef CONFIG_SMP
26895- GET_THREAD_INFO(%eax)
26896- movl TI_cpu(%eax), %eax
26897- movl __per_cpu_offset(,%eax,4), %eax
26898- mov per_cpu__xen_vcpu(%eax), %eax
26899+ push %fs
26900+ mov $(__KERNEL_PERCPU), %eax
26901+ mov %eax, %fs
26902+ mov PER_CPU_VAR(xen_vcpu), %eax
26903+ pop %fs
26904 #else
26905 movl per_cpu__xen_vcpu, %eax
26906 #endif
26907diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26908index 1a5ff24..a187d40 100644
26909--- a/arch/x86/xen/xen-head.S
26910+++ b/arch/x86/xen/xen-head.S
26911@@ -19,6 +19,17 @@ ENTRY(startup_xen)
26912 #ifdef CONFIG_X86_32
26913 mov %esi,xen_start_info
26914 mov $init_thread_union+THREAD_SIZE,%esp
26915+#ifdef CONFIG_SMP
26916+ movl $cpu_gdt_table,%edi
26917+ movl $__per_cpu_load,%eax
26918+ movw %ax,__KERNEL_PERCPU + 2(%edi)
26919+ rorl $16,%eax
26920+ movb %al,__KERNEL_PERCPU + 4(%edi)
26921+ movb %ah,__KERNEL_PERCPU + 7(%edi)
26922+ movl $__per_cpu_end - 1,%eax
26923+ subl $__per_cpu_start,%eax
26924+ movw %ax,__KERNEL_PERCPU + 0(%edi)
26925+#endif
26926 #else
26927 mov %rsi,xen_start_info
26928 mov $init_thread_union+THREAD_SIZE,%rsp
26929diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26930index f9153a3..51eab3d 100644
26931--- a/arch/x86/xen/xen-ops.h
26932+++ b/arch/x86/xen/xen-ops.h
26933@@ -10,8 +10,6 @@
26934 extern const char xen_hypervisor_callback[];
26935 extern const char xen_failsafe_callback[];
26936
26937-extern void *xen_initial_gdt;
26938-
26939 struct trap_info;
26940 void xen_copy_trap_info(struct trap_info *traps);
26941
26942diff --git a/block/blk-integrity.c b/block/blk-integrity.c
26943index 15c6308..96e83c2 100644
26944--- a/block/blk-integrity.c
26945+++ b/block/blk-integrity.c
26946@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
26947 NULL,
26948 };
26949
26950-static struct sysfs_ops integrity_ops = {
26951+static const struct sysfs_ops integrity_ops = {
26952 .show = &integrity_attr_show,
26953 .store = &integrity_attr_store,
26954 };
26955diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26956index ca56420..f2fc409 100644
26957--- a/block/blk-iopoll.c
26958+++ b/block/blk-iopoll.c
26959@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26960 }
26961 EXPORT_SYMBOL(blk_iopoll_complete);
26962
26963-static void blk_iopoll_softirq(struct softirq_action *h)
26964+static void blk_iopoll_softirq(void)
26965 {
26966 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26967 int rearm = 0, budget = blk_iopoll_budget;
26968diff --git a/block/blk-map.c b/block/blk-map.c
26969index 30a7e51..0aeec6a 100644
26970--- a/block/blk-map.c
26971+++ b/block/blk-map.c
26972@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
26973 * direct dma. else, set up kernel bounce buffers
26974 */
26975 uaddr = (unsigned long) ubuf;
26976- if (blk_rq_aligned(q, ubuf, len) && !map_data)
26977+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
26978 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
26979 else
26980 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
26981@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26982 for (i = 0; i < iov_count; i++) {
26983 unsigned long uaddr = (unsigned long)iov[i].iov_base;
26984
26985+ if (!iov[i].iov_len)
26986+ return -EINVAL;
26987+
26988 if (uaddr & queue_dma_alignment(q)) {
26989 unaligned = 1;
26990 break;
26991 }
26992- if (!iov[i].iov_len)
26993- return -EINVAL;
26994 }
26995
26996 if (unaligned || (q->dma_pad_mask & len) || map_data)
26997@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26998 if (!len || !kbuf)
26999 return -EINVAL;
27000
27001- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27002+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27003 if (do_copy)
27004 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27005 else
27006diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27007index ee9c216..58d410a 100644
27008--- a/block/blk-softirq.c
27009+++ b/block/blk-softirq.c
27010@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27011 * Softirq action handler - move entries to local list and loop over them
27012 * while passing them to the queue registered handler.
27013 */
27014-static void blk_done_softirq(struct softirq_action *h)
27015+static void blk_done_softirq(void)
27016 {
27017 struct list_head *cpu_list, local_list;
27018
27019diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27020index bb9c5ea..5330d48 100644
27021--- a/block/blk-sysfs.c
27022+++ b/block/blk-sysfs.c
27023@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27024 kmem_cache_free(blk_requestq_cachep, q);
27025 }
27026
27027-static struct sysfs_ops queue_sysfs_ops = {
27028+static const struct sysfs_ops queue_sysfs_ops = {
27029 .show = queue_attr_show,
27030 .store = queue_attr_store,
27031 };
27032diff --git a/block/bsg.c b/block/bsg.c
27033index 7154a7a..08ac2f0 100644
27034--- a/block/bsg.c
27035+++ b/block/bsg.c
27036@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27037 struct sg_io_v4 *hdr, struct bsg_device *bd,
27038 fmode_t has_write_perm)
27039 {
27040+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27041+ unsigned char *cmdptr;
27042+
27043 if (hdr->request_len > BLK_MAX_CDB) {
27044 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27045 if (!rq->cmd)
27046 return -ENOMEM;
27047- }
27048+ cmdptr = rq->cmd;
27049+ } else
27050+ cmdptr = tmpcmd;
27051
27052- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27053+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27054 hdr->request_len))
27055 return -EFAULT;
27056
27057+ if (cmdptr != rq->cmd)
27058+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27059+
27060 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27061 if (blk_verify_command(rq->cmd, has_write_perm))
27062 return -EPERM;
27063@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27064 rq->next_rq = next_rq;
27065 next_rq->cmd_type = rq->cmd_type;
27066
27067- dxferp = (void*)(unsigned long)hdr->din_xferp;
27068+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27069 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27070 hdr->din_xfer_len, GFP_KERNEL);
27071 if (ret)
27072@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27073
27074 if (hdr->dout_xfer_len) {
27075 dxfer_len = hdr->dout_xfer_len;
27076- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27077+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27078 } else if (hdr->din_xfer_len) {
27079 dxfer_len = hdr->din_xfer_len;
27080- dxferp = (void*)(unsigned long)hdr->din_xferp;
27081+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27082 } else
27083 dxfer_len = 0;
27084
27085@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27086 int len = min_t(unsigned int, hdr->max_response_len,
27087 rq->sense_len);
27088
27089- ret = copy_to_user((void*)(unsigned long)hdr->response,
27090+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27091 rq->sense, len);
27092 if (!ret)
27093 hdr->response_len = len;
27094diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27095index 9bd086c..ca1fc22 100644
27096--- a/block/compat_ioctl.c
27097+++ b/block/compat_ioctl.c
27098@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27099 err |= __get_user(f->spec1, &uf->spec1);
27100 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27101 err |= __get_user(name, &uf->name);
27102- f->name = compat_ptr(name);
27103+ f->name = (void __force_kernel *)compat_ptr(name);
27104 if (err) {
27105 err = -EFAULT;
27106 goto out;
27107diff --git a/block/elevator.c b/block/elevator.c
27108index a847046..75a1746 100644
27109--- a/block/elevator.c
27110+++ b/block/elevator.c
27111@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27112 return error;
27113 }
27114
27115-static struct sysfs_ops elv_sysfs_ops = {
27116+static const struct sysfs_ops elv_sysfs_ops = {
27117 .show = elv_attr_show,
27118 .store = elv_attr_store,
27119 };
27120diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27121index 1d5a780..0e2fb8c 100644
27122--- a/block/scsi_ioctl.c
27123+++ b/block/scsi_ioctl.c
27124@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
27125 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27126 struct sg_io_hdr *hdr, fmode_t mode)
27127 {
27128- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27129+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27130+ unsigned char *cmdptr;
27131+
27132+ if (rq->cmd != rq->__cmd)
27133+ cmdptr = rq->cmd;
27134+ else
27135+ cmdptr = tmpcmd;
27136+
27137+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27138 return -EFAULT;
27139+
27140+ if (cmdptr != rq->cmd)
27141+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27142+
27143 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27144 return -EPERM;
27145
27146@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27147 int err;
27148 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27149 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27150+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27151+ unsigned char *cmdptr;
27152
27153 if (!sic)
27154 return -EINVAL;
27155@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27156 */
27157 err = -EFAULT;
27158 rq->cmd_len = cmdlen;
27159- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27160+
27161+ if (rq->cmd != rq->__cmd)
27162+ cmdptr = rq->cmd;
27163+ else
27164+ cmdptr = tmpcmd;
27165+
27166+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27167 goto error;
27168
27169+ if (rq->cmd != cmdptr)
27170+ memcpy(rq->cmd, cmdptr, cmdlen);
27171+
27172 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27173 goto error;
27174
27175diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27176index 3533582..f143117 100644
27177--- a/crypto/cryptd.c
27178+++ b/crypto/cryptd.c
27179@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27180
27181 struct cryptd_blkcipher_request_ctx {
27182 crypto_completion_t complete;
27183-};
27184+} __no_const;
27185
27186 struct cryptd_hash_ctx {
27187 struct crypto_shash *child;
27188diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27189index a90d260..7a9765e 100644
27190--- a/crypto/gf128mul.c
27191+++ b/crypto/gf128mul.c
27192@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27193 for (i = 0; i < 7; ++i)
27194 gf128mul_x_lle(&p[i + 1], &p[i]);
27195
27196- memset(r, 0, sizeof(r));
27197+ memset(r, 0, sizeof(*r));
27198 for (i = 0;;) {
27199 u8 ch = ((u8 *)b)[15 - i];
27200
27201@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27202 for (i = 0; i < 7; ++i)
27203 gf128mul_x_bbe(&p[i + 1], &p[i]);
27204
27205- memset(r, 0, sizeof(r));
27206+ memset(r, 0, sizeof(*r));
27207 for (i = 0;;) {
27208 u8 ch = ((u8 *)b)[i];
27209
27210diff --git a/crypto/serpent.c b/crypto/serpent.c
27211index b651a55..023297d 100644
27212--- a/crypto/serpent.c
27213+++ b/crypto/serpent.c
27214@@ -21,6 +21,7 @@
27215 #include <asm/byteorder.h>
27216 #include <linux/crypto.h>
27217 #include <linux/types.h>
27218+#include <linux/sched.h>
27219
27220 /* Key is padded to the maximum of 256 bits before round key generation.
27221 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27222@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27223 u32 r0,r1,r2,r3,r4;
27224 int i;
27225
27226+ pax_track_stack();
27227+
27228 /* Copy key, add padding */
27229
27230 for (i = 0; i < keylen; ++i)
27231diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27232index 0d2cdb8..d8de48d 100644
27233--- a/drivers/acpi/acpi_pad.c
27234+++ b/drivers/acpi/acpi_pad.c
27235@@ -30,7 +30,7 @@
27236 #include <acpi/acpi_bus.h>
27237 #include <acpi/acpi_drivers.h>
27238
27239-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27240+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27241 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27242 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27243 static DEFINE_MUTEX(isolated_cpus_lock);
27244diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27245index 3f4602b..2e41d36 100644
27246--- a/drivers/acpi/battery.c
27247+++ b/drivers/acpi/battery.c
27248@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27249 }
27250
27251 static struct battery_file {
27252- struct file_operations ops;
27253+ const struct file_operations ops;
27254 mode_t mode;
27255 const char *name;
27256 } acpi_battery_file[] = {
27257diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27258index 7338b6a..82f0257 100644
27259--- a/drivers/acpi/dock.c
27260+++ b/drivers/acpi/dock.c
27261@@ -77,7 +77,7 @@ struct dock_dependent_device {
27262 struct list_head list;
27263 struct list_head hotplug_list;
27264 acpi_handle handle;
27265- struct acpi_dock_ops *ops;
27266+ const struct acpi_dock_ops *ops;
27267 void *context;
27268 };
27269
27270@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27271 * the dock driver after _DCK is executed.
27272 */
27273 int
27274-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27275+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27276 void *context)
27277 {
27278 struct dock_dependent_device *dd;
27279diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27280index 7c1c59e..2993595 100644
27281--- a/drivers/acpi/osl.c
27282+++ b/drivers/acpi/osl.c
27283@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27284 void __iomem *virt_addr;
27285
27286 virt_addr = ioremap(phys_addr, width);
27287+ if (!virt_addr)
27288+ return AE_NO_MEMORY;
27289 if (!value)
27290 value = &dummy;
27291
27292@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27293 void __iomem *virt_addr;
27294
27295 virt_addr = ioremap(phys_addr, width);
27296+ if (!virt_addr)
27297+ return AE_NO_MEMORY;
27298
27299 switch (width) {
27300 case 8:
27301diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27302index c216062..eec10d2 100644
27303--- a/drivers/acpi/power_meter.c
27304+++ b/drivers/acpi/power_meter.c
27305@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27306 return res;
27307
27308 temp /= 1000;
27309- if (temp < 0)
27310- return -EINVAL;
27311
27312 mutex_lock(&resource->lock);
27313 resource->trip[attr->index - 7] = temp;
27314diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27315index d0d25e2..961643d 100644
27316--- a/drivers/acpi/proc.c
27317+++ b/drivers/acpi/proc.c
27318@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27319 size_t count, loff_t * ppos)
27320 {
27321 struct list_head *node, *next;
27322- char strbuf[5];
27323- char str[5] = "";
27324- unsigned int len = count;
27325+ char strbuf[5] = {0};
27326 struct acpi_device *found_dev = NULL;
27327
27328- if (len > 4)
27329- len = 4;
27330- if (len < 0)
27331- return -EFAULT;
27332+ if (count > 4)
27333+ count = 4;
27334
27335- if (copy_from_user(strbuf, buffer, len))
27336+ if (copy_from_user(strbuf, buffer, count))
27337 return -EFAULT;
27338- strbuf[len] = '\0';
27339- sscanf(strbuf, "%s", str);
27340+ strbuf[count] = '\0';
27341
27342 mutex_lock(&acpi_device_lock);
27343 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27344@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27345 if (!dev->wakeup.flags.valid)
27346 continue;
27347
27348- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27349+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27350 dev->wakeup.state.enabled =
27351 dev->wakeup.state.enabled ? 0 : 1;
27352 found_dev = dev;
27353diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27354index 7102474..de8ad22 100644
27355--- a/drivers/acpi/processor_core.c
27356+++ b/drivers/acpi/processor_core.c
27357@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27358 return 0;
27359 }
27360
27361- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27362+ BUG_ON(pr->id >= nr_cpu_ids);
27363
27364 /*
27365 * Buggy BIOS check
27366diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27367index d933980..5761f13 100644
27368--- a/drivers/acpi/sbshc.c
27369+++ b/drivers/acpi/sbshc.c
27370@@ -17,7 +17,7 @@
27371
27372 #define PREFIX "ACPI: "
27373
27374-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27375+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27376 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27377
27378 struct acpi_smb_hc {
27379diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27380index 0458094..6978e7b 100644
27381--- a/drivers/acpi/sleep.c
27382+++ b/drivers/acpi/sleep.c
27383@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27384 }
27385 }
27386
27387-static struct platform_suspend_ops acpi_suspend_ops = {
27388+static const struct platform_suspend_ops acpi_suspend_ops = {
27389 .valid = acpi_suspend_state_valid,
27390 .begin = acpi_suspend_begin,
27391 .prepare_late = acpi_pm_prepare,
27392@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27393 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27394 * been requested.
27395 */
27396-static struct platform_suspend_ops acpi_suspend_ops_old = {
27397+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27398 .valid = acpi_suspend_state_valid,
27399 .begin = acpi_suspend_begin_old,
27400 .prepare_late = acpi_pm_disable_gpes,
27401@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27402 acpi_enable_all_runtime_gpes();
27403 }
27404
27405-static struct platform_hibernation_ops acpi_hibernation_ops = {
27406+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27407 .begin = acpi_hibernation_begin,
27408 .end = acpi_pm_end,
27409 .pre_snapshot = acpi_hibernation_pre_snapshot,
27410@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27411 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27412 * been requested.
27413 */
27414-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27415+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27416 .begin = acpi_hibernation_begin_old,
27417 .end = acpi_pm_end,
27418 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27419diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27420index 05dff63..b662ab7 100644
27421--- a/drivers/acpi/video.c
27422+++ b/drivers/acpi/video.c
27423@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27424 vd->brightness->levels[request_level]);
27425 }
27426
27427-static struct backlight_ops acpi_backlight_ops = {
27428+static const struct backlight_ops acpi_backlight_ops = {
27429 .get_brightness = acpi_video_get_brightness,
27430 .update_status = acpi_video_set_brightness,
27431 };
27432diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27433index 6787aab..23ffb0e 100644
27434--- a/drivers/ata/ahci.c
27435+++ b/drivers/ata/ahci.c
27436@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27437 .sdev_attrs = ahci_sdev_attrs,
27438 };
27439
27440-static struct ata_port_operations ahci_ops = {
27441+static const struct ata_port_operations ahci_ops = {
27442 .inherits = &sata_pmp_port_ops,
27443
27444 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27445@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27446 .port_stop = ahci_port_stop,
27447 };
27448
27449-static struct ata_port_operations ahci_vt8251_ops = {
27450+static const struct ata_port_operations ahci_vt8251_ops = {
27451 .inherits = &ahci_ops,
27452 .hardreset = ahci_vt8251_hardreset,
27453 };
27454
27455-static struct ata_port_operations ahci_p5wdh_ops = {
27456+static const struct ata_port_operations ahci_p5wdh_ops = {
27457 .inherits = &ahci_ops,
27458 .hardreset = ahci_p5wdh_hardreset,
27459 };
27460
27461-static struct ata_port_operations ahci_sb600_ops = {
27462+static const struct ata_port_operations ahci_sb600_ops = {
27463 .inherits = &ahci_ops,
27464 .softreset = ahci_sb600_softreset,
27465 .pmp_softreset = ahci_sb600_softreset,
27466diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27467index 99e7196..4968c77 100644
27468--- a/drivers/ata/ata_generic.c
27469+++ b/drivers/ata/ata_generic.c
27470@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27471 ATA_BMDMA_SHT(DRV_NAME),
27472 };
27473
27474-static struct ata_port_operations generic_port_ops = {
27475+static const struct ata_port_operations generic_port_ops = {
27476 .inherits = &ata_bmdma_port_ops,
27477 .cable_detect = ata_cable_unknown,
27478 .set_mode = generic_set_mode,
27479diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27480index c33591d..000c121 100644
27481--- a/drivers/ata/ata_piix.c
27482+++ b/drivers/ata/ata_piix.c
27483@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27484 ATA_BMDMA_SHT(DRV_NAME),
27485 };
27486
27487-static struct ata_port_operations piix_pata_ops = {
27488+static const struct ata_port_operations piix_pata_ops = {
27489 .inherits = &ata_bmdma32_port_ops,
27490 .cable_detect = ata_cable_40wire,
27491 .set_piomode = piix_set_piomode,
27492@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27493 .prereset = piix_pata_prereset,
27494 };
27495
27496-static struct ata_port_operations piix_vmw_ops = {
27497+static const struct ata_port_operations piix_vmw_ops = {
27498 .inherits = &piix_pata_ops,
27499 .bmdma_status = piix_vmw_bmdma_status,
27500 };
27501
27502-static struct ata_port_operations ich_pata_ops = {
27503+static const struct ata_port_operations ich_pata_ops = {
27504 .inherits = &piix_pata_ops,
27505 .cable_detect = ich_pata_cable_detect,
27506 .set_dmamode = ich_set_dmamode,
27507 };
27508
27509-static struct ata_port_operations piix_sata_ops = {
27510+static const struct ata_port_operations piix_sata_ops = {
27511 .inherits = &ata_bmdma_port_ops,
27512 };
27513
27514-static struct ata_port_operations piix_sidpr_sata_ops = {
27515+static const struct ata_port_operations piix_sidpr_sata_ops = {
27516 .inherits = &piix_sata_ops,
27517 .hardreset = sata_std_hardreset,
27518 .scr_read = piix_sidpr_scr_read,
27519diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27520index b0882cd..c295d65 100644
27521--- a/drivers/ata/libata-acpi.c
27522+++ b/drivers/ata/libata-acpi.c
27523@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27524 ata_acpi_uevent(dev->link->ap, dev, event);
27525 }
27526
27527-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27528+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27529 .handler = ata_acpi_dev_notify_dock,
27530 .uevent = ata_acpi_dev_uevent,
27531 };
27532
27533-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27534+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27535 .handler = ata_acpi_ap_notify_dock,
27536 .uevent = ata_acpi_ap_uevent,
27537 };
27538diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27539index d4f7f99..94f603e 100644
27540--- a/drivers/ata/libata-core.c
27541+++ b/drivers/ata/libata-core.c
27542@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27543 struct ata_port *ap;
27544 unsigned int tag;
27545
27546- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27547+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27548 ap = qc->ap;
27549
27550 qc->flags = 0;
27551@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27552 struct ata_port *ap;
27553 struct ata_link *link;
27554
27555- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27556+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27557 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27558 ap = qc->ap;
27559 link = qc->dev->link;
27560@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27561 * LOCKING:
27562 * None.
27563 */
27564-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27565+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27566 {
27567 static DEFINE_SPINLOCK(lock);
27568 const struct ata_port_operations *cur;
27569@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27570 return;
27571
27572 spin_lock(&lock);
27573+ pax_open_kernel();
27574
27575 for (cur = ops->inherits; cur; cur = cur->inherits) {
27576 void **inherit = (void **)cur;
27577@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27578 if (IS_ERR(*pp))
27579 *pp = NULL;
27580
27581- ops->inherits = NULL;
27582+ *(struct ata_port_operations **)&ops->inherits = NULL;
27583
27584+ pax_close_kernel();
27585 spin_unlock(&lock);
27586 }
27587
27588@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27589 */
27590 /* KILLME - the only user left is ipr */
27591 void ata_host_init(struct ata_host *host, struct device *dev,
27592- unsigned long flags, struct ata_port_operations *ops)
27593+ unsigned long flags, const struct ata_port_operations *ops)
27594 {
27595 spin_lock_init(&host->lock);
27596 host->dev = dev;
27597@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27598 /* truly dummy */
27599 }
27600
27601-struct ata_port_operations ata_dummy_port_ops = {
27602+const struct ata_port_operations ata_dummy_port_ops = {
27603 .qc_prep = ata_noop_qc_prep,
27604 .qc_issue = ata_dummy_qc_issue,
27605 .error_handler = ata_dummy_error_handler,
27606diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27607index e5bdb9b..45a8e72 100644
27608--- a/drivers/ata/libata-eh.c
27609+++ b/drivers/ata/libata-eh.c
27610@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27611 {
27612 struct ata_link *link;
27613
27614+ pax_track_stack();
27615+
27616 ata_for_each_link(link, ap, HOST_FIRST)
27617 ata_eh_link_report(link);
27618 }
27619@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27620 */
27621 void ata_std_error_handler(struct ata_port *ap)
27622 {
27623- struct ata_port_operations *ops = ap->ops;
27624+ const struct ata_port_operations *ops = ap->ops;
27625 ata_reset_fn_t hardreset = ops->hardreset;
27626
27627 /* ignore built-in hardreset if SCR access is not available */
27628diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27629index 51f0ffb..19ce3e3 100644
27630--- a/drivers/ata/libata-pmp.c
27631+++ b/drivers/ata/libata-pmp.c
27632@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27633 */
27634 static int sata_pmp_eh_recover(struct ata_port *ap)
27635 {
27636- struct ata_port_operations *ops = ap->ops;
27637+ const struct ata_port_operations *ops = ap->ops;
27638 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27639 struct ata_link *pmp_link = &ap->link;
27640 struct ata_device *pmp_dev = pmp_link->device;
27641diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27642index d8f35fe..288180a 100644
27643--- a/drivers/ata/pata_acpi.c
27644+++ b/drivers/ata/pata_acpi.c
27645@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27646 ATA_BMDMA_SHT(DRV_NAME),
27647 };
27648
27649-static struct ata_port_operations pacpi_ops = {
27650+static const struct ata_port_operations pacpi_ops = {
27651 .inherits = &ata_bmdma_port_ops,
27652 .qc_issue = pacpi_qc_issue,
27653 .cable_detect = pacpi_cable_detect,
27654diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27655index 9434114..1f2f364 100644
27656--- a/drivers/ata/pata_ali.c
27657+++ b/drivers/ata/pata_ali.c
27658@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27659 * Port operations for PIO only ALi
27660 */
27661
27662-static struct ata_port_operations ali_early_port_ops = {
27663+static const struct ata_port_operations ali_early_port_ops = {
27664 .inherits = &ata_sff_port_ops,
27665 .cable_detect = ata_cable_40wire,
27666 .set_piomode = ali_set_piomode,
27667@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27668 * Port operations for DMA capable ALi without cable
27669 * detect
27670 */
27671-static struct ata_port_operations ali_20_port_ops = {
27672+static const struct ata_port_operations ali_20_port_ops = {
27673 .inherits = &ali_dma_base_ops,
27674 .cable_detect = ata_cable_40wire,
27675 .mode_filter = ali_20_filter,
27676@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27677 /*
27678 * Port operations for DMA capable ALi with cable detect
27679 */
27680-static struct ata_port_operations ali_c2_port_ops = {
27681+static const struct ata_port_operations ali_c2_port_ops = {
27682 .inherits = &ali_dma_base_ops,
27683 .check_atapi_dma = ali_check_atapi_dma,
27684 .cable_detect = ali_c2_cable_detect,
27685@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27686 /*
27687 * Port operations for DMA capable ALi with cable detect
27688 */
27689-static struct ata_port_operations ali_c4_port_ops = {
27690+static const struct ata_port_operations ali_c4_port_ops = {
27691 .inherits = &ali_dma_base_ops,
27692 .check_atapi_dma = ali_check_atapi_dma,
27693 .cable_detect = ali_c2_cable_detect,
27694@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27695 /*
27696 * Port operations for DMA capable ALi with cable detect and LBA48
27697 */
27698-static struct ata_port_operations ali_c5_port_ops = {
27699+static const struct ata_port_operations ali_c5_port_ops = {
27700 .inherits = &ali_dma_base_ops,
27701 .check_atapi_dma = ali_check_atapi_dma,
27702 .dev_config = ali_warn_atapi_dma,
27703diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27704index 567f3f7..c8ee0da 100644
27705--- a/drivers/ata/pata_amd.c
27706+++ b/drivers/ata/pata_amd.c
27707@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27708 .prereset = amd_pre_reset,
27709 };
27710
27711-static struct ata_port_operations amd33_port_ops = {
27712+static const struct ata_port_operations amd33_port_ops = {
27713 .inherits = &amd_base_port_ops,
27714 .cable_detect = ata_cable_40wire,
27715 .set_piomode = amd33_set_piomode,
27716 .set_dmamode = amd33_set_dmamode,
27717 };
27718
27719-static struct ata_port_operations amd66_port_ops = {
27720+static const struct ata_port_operations amd66_port_ops = {
27721 .inherits = &amd_base_port_ops,
27722 .cable_detect = ata_cable_unknown,
27723 .set_piomode = amd66_set_piomode,
27724 .set_dmamode = amd66_set_dmamode,
27725 };
27726
27727-static struct ata_port_operations amd100_port_ops = {
27728+static const struct ata_port_operations amd100_port_ops = {
27729 .inherits = &amd_base_port_ops,
27730 .cable_detect = ata_cable_unknown,
27731 .set_piomode = amd100_set_piomode,
27732 .set_dmamode = amd100_set_dmamode,
27733 };
27734
27735-static struct ata_port_operations amd133_port_ops = {
27736+static const struct ata_port_operations amd133_port_ops = {
27737 .inherits = &amd_base_port_ops,
27738 .cable_detect = amd_cable_detect,
27739 .set_piomode = amd133_set_piomode,
27740@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27741 .host_stop = nv_host_stop,
27742 };
27743
27744-static struct ata_port_operations nv100_port_ops = {
27745+static const struct ata_port_operations nv100_port_ops = {
27746 .inherits = &nv_base_port_ops,
27747 .set_piomode = nv100_set_piomode,
27748 .set_dmamode = nv100_set_dmamode,
27749 };
27750
27751-static struct ata_port_operations nv133_port_ops = {
27752+static const struct ata_port_operations nv133_port_ops = {
27753 .inherits = &nv_base_port_ops,
27754 .set_piomode = nv133_set_piomode,
27755 .set_dmamode = nv133_set_dmamode,
27756diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27757index d332cfd..4b7eaae 100644
27758--- a/drivers/ata/pata_artop.c
27759+++ b/drivers/ata/pata_artop.c
27760@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27761 ATA_BMDMA_SHT(DRV_NAME),
27762 };
27763
27764-static struct ata_port_operations artop6210_ops = {
27765+static const struct ata_port_operations artop6210_ops = {
27766 .inherits = &ata_bmdma_port_ops,
27767 .cable_detect = ata_cable_40wire,
27768 .set_piomode = artop6210_set_piomode,
27769@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27770 .qc_defer = artop6210_qc_defer,
27771 };
27772
27773-static struct ata_port_operations artop6260_ops = {
27774+static const struct ata_port_operations artop6260_ops = {
27775 .inherits = &ata_bmdma_port_ops,
27776 .cable_detect = artop6260_cable_detect,
27777 .set_piomode = artop6260_set_piomode,
27778diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27779index 5c129f9..7bb7ccb 100644
27780--- a/drivers/ata/pata_at32.c
27781+++ b/drivers/ata/pata_at32.c
27782@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27783 ATA_PIO_SHT(DRV_NAME),
27784 };
27785
27786-static struct ata_port_operations at32_port_ops = {
27787+static const struct ata_port_operations at32_port_ops = {
27788 .inherits = &ata_sff_port_ops,
27789 .cable_detect = ata_cable_40wire,
27790 .set_piomode = pata_at32_set_piomode,
27791diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
27792index 41c94b1..829006d 100644
27793--- a/drivers/ata/pata_at91.c
27794+++ b/drivers/ata/pata_at91.c
27795@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
27796 ATA_PIO_SHT(DRV_NAME),
27797 };
27798
27799-static struct ata_port_operations pata_at91_port_ops = {
27800+static const struct ata_port_operations pata_at91_port_ops = {
27801 .inherits = &ata_sff_port_ops,
27802
27803 .sff_data_xfer = pata_at91_data_xfer_noirq,
27804diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
27805index ae4454d..d391eb4 100644
27806--- a/drivers/ata/pata_atiixp.c
27807+++ b/drivers/ata/pata_atiixp.c
27808@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
27809 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27810 };
27811
27812-static struct ata_port_operations atiixp_port_ops = {
27813+static const struct ata_port_operations atiixp_port_ops = {
27814 .inherits = &ata_bmdma_port_ops,
27815
27816 .qc_prep = ata_sff_dumb_qc_prep,
27817diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
27818index 6fe7ded..2a425dc 100644
27819--- a/drivers/ata/pata_atp867x.c
27820+++ b/drivers/ata/pata_atp867x.c
27821@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
27822 ATA_BMDMA_SHT(DRV_NAME),
27823 };
27824
27825-static struct ata_port_operations atp867x_ops = {
27826+static const struct ata_port_operations atp867x_ops = {
27827 .inherits = &ata_bmdma_port_ops,
27828 .cable_detect = atp867x_cable_detect,
27829 .set_piomode = atp867x_set_piomode,
27830diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
27831index c4b47a3..b27a367 100644
27832--- a/drivers/ata/pata_bf54x.c
27833+++ b/drivers/ata/pata_bf54x.c
27834@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
27835 .dma_boundary = ATA_DMA_BOUNDARY,
27836 };
27837
27838-static struct ata_port_operations bfin_pata_ops = {
27839+static const struct ata_port_operations bfin_pata_ops = {
27840 .inherits = &ata_sff_port_ops,
27841
27842 .set_piomode = bfin_set_piomode,
27843diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
27844index 5acf9fa..84248be 100644
27845--- a/drivers/ata/pata_cmd640.c
27846+++ b/drivers/ata/pata_cmd640.c
27847@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
27848 ATA_BMDMA_SHT(DRV_NAME),
27849 };
27850
27851-static struct ata_port_operations cmd640_port_ops = {
27852+static const struct ata_port_operations cmd640_port_ops = {
27853 .inherits = &ata_bmdma_port_ops,
27854 /* In theory xfer_noirq is not needed once we kill the prefetcher */
27855 .sff_data_xfer = ata_sff_data_xfer_noirq,
27856diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
27857index ccd2694..c869c3d 100644
27858--- a/drivers/ata/pata_cmd64x.c
27859+++ b/drivers/ata/pata_cmd64x.c
27860@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
27861 .set_dmamode = cmd64x_set_dmamode,
27862 };
27863
27864-static struct ata_port_operations cmd64x_port_ops = {
27865+static const struct ata_port_operations cmd64x_port_ops = {
27866 .inherits = &cmd64x_base_ops,
27867 .cable_detect = ata_cable_40wire,
27868 };
27869
27870-static struct ata_port_operations cmd646r1_port_ops = {
27871+static const struct ata_port_operations cmd646r1_port_ops = {
27872 .inherits = &cmd64x_base_ops,
27873 .bmdma_stop = cmd646r1_bmdma_stop,
27874 .cable_detect = ata_cable_40wire,
27875 };
27876
27877-static struct ata_port_operations cmd648_port_ops = {
27878+static const struct ata_port_operations cmd648_port_ops = {
27879 .inherits = &cmd64x_base_ops,
27880 .bmdma_stop = cmd648_bmdma_stop,
27881 .cable_detect = cmd648_cable_detect,
27882diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
27883index 0df83cf..d7595b0 100644
27884--- a/drivers/ata/pata_cs5520.c
27885+++ b/drivers/ata/pata_cs5520.c
27886@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
27887 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27888 };
27889
27890-static struct ata_port_operations cs5520_port_ops = {
27891+static const struct ata_port_operations cs5520_port_ops = {
27892 .inherits = &ata_bmdma_port_ops,
27893 .qc_prep = ata_sff_dumb_qc_prep,
27894 .cable_detect = ata_cable_40wire,
27895diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
27896index c974b05..6d26b11 100644
27897--- a/drivers/ata/pata_cs5530.c
27898+++ b/drivers/ata/pata_cs5530.c
27899@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
27900 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27901 };
27902
27903-static struct ata_port_operations cs5530_port_ops = {
27904+static const struct ata_port_operations cs5530_port_ops = {
27905 .inherits = &ata_bmdma_port_ops,
27906
27907 .qc_prep = ata_sff_dumb_qc_prep,
27908diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
27909index 403f561..aacd26b 100644
27910--- a/drivers/ata/pata_cs5535.c
27911+++ b/drivers/ata/pata_cs5535.c
27912@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
27913 ATA_BMDMA_SHT(DRV_NAME),
27914 };
27915
27916-static struct ata_port_operations cs5535_port_ops = {
27917+static const struct ata_port_operations cs5535_port_ops = {
27918 .inherits = &ata_bmdma_port_ops,
27919 .cable_detect = cs5535_cable_detect,
27920 .set_piomode = cs5535_set_piomode,
27921diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
27922index 6da4cb4..de24a25 100644
27923--- a/drivers/ata/pata_cs5536.c
27924+++ b/drivers/ata/pata_cs5536.c
27925@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
27926 ATA_BMDMA_SHT(DRV_NAME),
27927 };
27928
27929-static struct ata_port_operations cs5536_port_ops = {
27930+static const struct ata_port_operations cs5536_port_ops = {
27931 .inherits = &ata_bmdma_port_ops,
27932 .cable_detect = cs5536_cable_detect,
27933 .set_piomode = cs5536_set_piomode,
27934diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
27935index 8fb040b..b16a9c9 100644
27936--- a/drivers/ata/pata_cypress.c
27937+++ b/drivers/ata/pata_cypress.c
27938@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
27939 ATA_BMDMA_SHT(DRV_NAME),
27940 };
27941
27942-static struct ata_port_operations cy82c693_port_ops = {
27943+static const struct ata_port_operations cy82c693_port_ops = {
27944 .inherits = &ata_bmdma_port_ops,
27945 .cable_detect = ata_cable_40wire,
27946 .set_piomode = cy82c693_set_piomode,
27947diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
27948index 2a6412f..555ee11 100644
27949--- a/drivers/ata/pata_efar.c
27950+++ b/drivers/ata/pata_efar.c
27951@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
27952 ATA_BMDMA_SHT(DRV_NAME),
27953 };
27954
27955-static struct ata_port_operations efar_ops = {
27956+static const struct ata_port_operations efar_ops = {
27957 .inherits = &ata_bmdma_port_ops,
27958 .cable_detect = efar_cable_detect,
27959 .set_piomode = efar_set_piomode,
27960diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
27961index b9d8836..0b92030 100644
27962--- a/drivers/ata/pata_hpt366.c
27963+++ b/drivers/ata/pata_hpt366.c
27964@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
27965 * Configuration for HPT366/68
27966 */
27967
27968-static struct ata_port_operations hpt366_port_ops = {
27969+static const struct ata_port_operations hpt366_port_ops = {
27970 .inherits = &ata_bmdma_port_ops,
27971 .cable_detect = hpt36x_cable_detect,
27972 .mode_filter = hpt366_filter,
27973diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
27974index 5af7f19..00c4980 100644
27975--- a/drivers/ata/pata_hpt37x.c
27976+++ b/drivers/ata/pata_hpt37x.c
27977@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
27978 * Configuration for HPT370
27979 */
27980
27981-static struct ata_port_operations hpt370_port_ops = {
27982+static const struct ata_port_operations hpt370_port_ops = {
27983 .inherits = &ata_bmdma_port_ops,
27984
27985 .bmdma_stop = hpt370_bmdma_stop,
27986@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
27987 * Configuration for HPT370A. Close to 370 but less filters
27988 */
27989
27990-static struct ata_port_operations hpt370a_port_ops = {
27991+static const struct ata_port_operations hpt370a_port_ops = {
27992 .inherits = &hpt370_port_ops,
27993 .mode_filter = hpt370a_filter,
27994 };
27995@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
27996 * and DMA mode setting functionality.
27997 */
27998
27999-static struct ata_port_operations hpt372_port_ops = {
28000+static const struct ata_port_operations hpt372_port_ops = {
28001 .inherits = &ata_bmdma_port_ops,
28002
28003 .bmdma_stop = hpt37x_bmdma_stop,
28004@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28005 * but we have a different cable detection procedure for function 1.
28006 */
28007
28008-static struct ata_port_operations hpt374_fn1_port_ops = {
28009+static const struct ata_port_operations hpt374_fn1_port_ops = {
28010 .inherits = &hpt372_port_ops,
28011 .prereset = hpt374_fn1_pre_reset,
28012 };
28013diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28014index 100f227..2e39382 100644
28015--- a/drivers/ata/pata_hpt3x2n.c
28016+++ b/drivers/ata/pata_hpt3x2n.c
28017@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28018 * Configuration for HPT3x2n.
28019 */
28020
28021-static struct ata_port_operations hpt3x2n_port_ops = {
28022+static const struct ata_port_operations hpt3x2n_port_ops = {
28023 .inherits = &ata_bmdma_port_ops,
28024
28025 .bmdma_stop = hpt3x2n_bmdma_stop,
28026diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28027index 7e31025..6fca8f4 100644
28028--- a/drivers/ata/pata_hpt3x3.c
28029+++ b/drivers/ata/pata_hpt3x3.c
28030@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28031 ATA_BMDMA_SHT(DRV_NAME),
28032 };
28033
28034-static struct ata_port_operations hpt3x3_port_ops = {
28035+static const struct ata_port_operations hpt3x3_port_ops = {
28036 .inherits = &ata_bmdma_port_ops,
28037 .cable_detect = ata_cable_40wire,
28038 .set_piomode = hpt3x3_set_piomode,
28039diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28040index b663b7f..9a26c2a 100644
28041--- a/drivers/ata/pata_icside.c
28042+++ b/drivers/ata/pata_icside.c
28043@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28044 }
28045 }
28046
28047-static struct ata_port_operations pata_icside_port_ops = {
28048+static const struct ata_port_operations pata_icside_port_ops = {
28049 .inherits = &ata_sff_port_ops,
28050 /* no need to build any PRD tables for DMA */
28051 .qc_prep = ata_noop_qc_prep,
28052diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28053index 4bceb88..457dfb6 100644
28054--- a/drivers/ata/pata_isapnp.c
28055+++ b/drivers/ata/pata_isapnp.c
28056@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28057 ATA_PIO_SHT(DRV_NAME),
28058 };
28059
28060-static struct ata_port_operations isapnp_port_ops = {
28061+static const struct ata_port_operations isapnp_port_ops = {
28062 .inherits = &ata_sff_port_ops,
28063 .cable_detect = ata_cable_40wire,
28064 };
28065
28066-static struct ata_port_operations isapnp_noalt_port_ops = {
28067+static const struct ata_port_operations isapnp_noalt_port_ops = {
28068 .inherits = &ata_sff_port_ops,
28069 .cable_detect = ata_cable_40wire,
28070 /* No altstatus so we don't want to use the lost interrupt poll */
28071diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28072index f156da8..24976e2 100644
28073--- a/drivers/ata/pata_it8213.c
28074+++ b/drivers/ata/pata_it8213.c
28075@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28076 };
28077
28078
28079-static struct ata_port_operations it8213_ops = {
28080+static const struct ata_port_operations it8213_ops = {
28081 .inherits = &ata_bmdma_port_ops,
28082 .cable_detect = it8213_cable_detect,
28083 .set_piomode = it8213_set_piomode,
28084diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28085index 188bc2f..ca9e785 100644
28086--- a/drivers/ata/pata_it821x.c
28087+++ b/drivers/ata/pata_it821x.c
28088@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28089 ATA_BMDMA_SHT(DRV_NAME),
28090 };
28091
28092-static struct ata_port_operations it821x_smart_port_ops = {
28093+static const struct ata_port_operations it821x_smart_port_ops = {
28094 .inherits = &ata_bmdma_port_ops,
28095
28096 .check_atapi_dma= it821x_check_atapi_dma,
28097@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28098 .port_start = it821x_port_start,
28099 };
28100
28101-static struct ata_port_operations it821x_passthru_port_ops = {
28102+static const struct ata_port_operations it821x_passthru_port_ops = {
28103 .inherits = &ata_bmdma_port_ops,
28104
28105 .check_atapi_dma= it821x_check_atapi_dma,
28106@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28107 .port_start = it821x_port_start,
28108 };
28109
28110-static struct ata_port_operations it821x_rdc_port_ops = {
28111+static const struct ata_port_operations it821x_rdc_port_ops = {
28112 .inherits = &ata_bmdma_port_ops,
28113
28114 .check_atapi_dma= it821x_check_atapi_dma,
28115diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28116index ba54b08..4b952b7 100644
28117--- a/drivers/ata/pata_ixp4xx_cf.c
28118+++ b/drivers/ata/pata_ixp4xx_cf.c
28119@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28120 ATA_PIO_SHT(DRV_NAME),
28121 };
28122
28123-static struct ata_port_operations ixp4xx_port_ops = {
28124+static const struct ata_port_operations ixp4xx_port_ops = {
28125 .inherits = &ata_sff_port_ops,
28126 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28127 .cable_detect = ata_cable_40wire,
28128diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28129index 3a1474a..434b0ff 100644
28130--- a/drivers/ata/pata_jmicron.c
28131+++ b/drivers/ata/pata_jmicron.c
28132@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28133 ATA_BMDMA_SHT(DRV_NAME),
28134 };
28135
28136-static struct ata_port_operations jmicron_ops = {
28137+static const struct ata_port_operations jmicron_ops = {
28138 .inherits = &ata_bmdma_port_ops,
28139 .prereset = jmicron_pre_reset,
28140 };
28141diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28142index 6932e56..220e71d 100644
28143--- a/drivers/ata/pata_legacy.c
28144+++ b/drivers/ata/pata_legacy.c
28145@@ -106,7 +106,7 @@ struct legacy_probe {
28146
28147 struct legacy_controller {
28148 const char *name;
28149- struct ata_port_operations *ops;
28150+ const struct ata_port_operations *ops;
28151 unsigned int pio_mask;
28152 unsigned int flags;
28153 unsigned int pflags;
28154@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28155 * pio_mask as well.
28156 */
28157
28158-static struct ata_port_operations simple_port_ops = {
28159+static const struct ata_port_operations simple_port_ops = {
28160 .inherits = &legacy_base_port_ops,
28161 .sff_data_xfer = ata_sff_data_xfer_noirq,
28162 };
28163
28164-static struct ata_port_operations legacy_port_ops = {
28165+static const struct ata_port_operations legacy_port_ops = {
28166 .inherits = &legacy_base_port_ops,
28167 .sff_data_xfer = ata_sff_data_xfer_noirq,
28168 .set_mode = legacy_set_mode,
28169@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28170 return buflen;
28171 }
28172
28173-static struct ata_port_operations pdc20230_port_ops = {
28174+static const struct ata_port_operations pdc20230_port_ops = {
28175 .inherits = &legacy_base_port_ops,
28176 .set_piomode = pdc20230_set_piomode,
28177 .sff_data_xfer = pdc_data_xfer_vlb,
28178@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28179 ioread8(ap->ioaddr.status_addr);
28180 }
28181
28182-static struct ata_port_operations ht6560a_port_ops = {
28183+static const struct ata_port_operations ht6560a_port_ops = {
28184 .inherits = &legacy_base_port_ops,
28185 .set_piomode = ht6560a_set_piomode,
28186 };
28187@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28188 ioread8(ap->ioaddr.status_addr);
28189 }
28190
28191-static struct ata_port_operations ht6560b_port_ops = {
28192+static const struct ata_port_operations ht6560b_port_ops = {
28193 .inherits = &legacy_base_port_ops,
28194 .set_piomode = ht6560b_set_piomode,
28195 };
28196@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28197 }
28198
28199
28200-static struct ata_port_operations opti82c611a_port_ops = {
28201+static const struct ata_port_operations opti82c611a_port_ops = {
28202 .inherits = &legacy_base_port_ops,
28203 .set_piomode = opti82c611a_set_piomode,
28204 };
28205@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28206 return ata_sff_qc_issue(qc);
28207 }
28208
28209-static struct ata_port_operations opti82c46x_port_ops = {
28210+static const struct ata_port_operations opti82c46x_port_ops = {
28211 .inherits = &legacy_base_port_ops,
28212 .set_piomode = opti82c46x_set_piomode,
28213 .qc_issue = opti82c46x_qc_issue,
28214@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28215 return 0;
28216 }
28217
28218-static struct ata_port_operations qdi6500_port_ops = {
28219+static const struct ata_port_operations qdi6500_port_ops = {
28220 .inherits = &legacy_base_port_ops,
28221 .set_piomode = qdi6500_set_piomode,
28222 .qc_issue = qdi_qc_issue,
28223 .sff_data_xfer = vlb32_data_xfer,
28224 };
28225
28226-static struct ata_port_operations qdi6580_port_ops = {
28227+static const struct ata_port_operations qdi6580_port_ops = {
28228 .inherits = &legacy_base_port_ops,
28229 .set_piomode = qdi6580_set_piomode,
28230 .sff_data_xfer = vlb32_data_xfer,
28231 };
28232
28233-static struct ata_port_operations qdi6580dp_port_ops = {
28234+static const struct ata_port_operations qdi6580dp_port_ops = {
28235 .inherits = &legacy_base_port_ops,
28236 .set_piomode = qdi6580dp_set_piomode,
28237 .sff_data_xfer = vlb32_data_xfer,
28238@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28239 return 0;
28240 }
28241
28242-static struct ata_port_operations winbond_port_ops = {
28243+static const struct ata_port_operations winbond_port_ops = {
28244 .inherits = &legacy_base_port_ops,
28245 .set_piomode = winbond_set_piomode,
28246 .sff_data_xfer = vlb32_data_xfer,
28247@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28248 int pio_modes = controller->pio_mask;
28249 unsigned long io = probe->port;
28250 u32 mask = (1 << probe->slot);
28251- struct ata_port_operations *ops = controller->ops;
28252+ const struct ata_port_operations *ops = controller->ops;
28253 struct legacy_data *ld = &legacy_data[probe->slot];
28254 struct ata_host *host = NULL;
28255 struct ata_port *ap;
28256diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28257index 2096fb7..4d090fc 100644
28258--- a/drivers/ata/pata_marvell.c
28259+++ b/drivers/ata/pata_marvell.c
28260@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28261 ATA_BMDMA_SHT(DRV_NAME),
28262 };
28263
28264-static struct ata_port_operations marvell_ops = {
28265+static const struct ata_port_operations marvell_ops = {
28266 .inherits = &ata_bmdma_port_ops,
28267 .cable_detect = marvell_cable_detect,
28268 .prereset = marvell_pre_reset,
28269diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28270index 99d41be..7d56aa8 100644
28271--- a/drivers/ata/pata_mpc52xx.c
28272+++ b/drivers/ata/pata_mpc52xx.c
28273@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28274 ATA_PIO_SHT(DRV_NAME),
28275 };
28276
28277-static struct ata_port_operations mpc52xx_ata_port_ops = {
28278+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28279 .inherits = &ata_bmdma_port_ops,
28280 .sff_dev_select = mpc52xx_ata_dev_select,
28281 .set_piomode = mpc52xx_ata_set_piomode,
28282diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28283index b21f002..0a27e7f 100644
28284--- a/drivers/ata/pata_mpiix.c
28285+++ b/drivers/ata/pata_mpiix.c
28286@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28287 ATA_PIO_SHT(DRV_NAME),
28288 };
28289
28290-static struct ata_port_operations mpiix_port_ops = {
28291+static const struct ata_port_operations mpiix_port_ops = {
28292 .inherits = &ata_sff_port_ops,
28293 .qc_issue = mpiix_qc_issue,
28294 .cable_detect = ata_cable_40wire,
28295diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28296index f0d52f7..89c3be3 100644
28297--- a/drivers/ata/pata_netcell.c
28298+++ b/drivers/ata/pata_netcell.c
28299@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28300 ATA_BMDMA_SHT(DRV_NAME),
28301 };
28302
28303-static struct ata_port_operations netcell_ops = {
28304+static const struct ata_port_operations netcell_ops = {
28305 .inherits = &ata_bmdma_port_ops,
28306 .cable_detect = ata_cable_80wire,
28307 .read_id = netcell_read_id,
28308diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28309index dd53a66..a3f4317 100644
28310--- a/drivers/ata/pata_ninja32.c
28311+++ b/drivers/ata/pata_ninja32.c
28312@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28313 ATA_BMDMA_SHT(DRV_NAME),
28314 };
28315
28316-static struct ata_port_operations ninja32_port_ops = {
28317+static const struct ata_port_operations ninja32_port_ops = {
28318 .inherits = &ata_bmdma_port_ops,
28319 .sff_dev_select = ninja32_dev_select,
28320 .cable_detect = ata_cable_40wire,
28321diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28322index ca53fac..9aa93ef 100644
28323--- a/drivers/ata/pata_ns87410.c
28324+++ b/drivers/ata/pata_ns87410.c
28325@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28326 ATA_PIO_SHT(DRV_NAME),
28327 };
28328
28329-static struct ata_port_operations ns87410_port_ops = {
28330+static const struct ata_port_operations ns87410_port_ops = {
28331 .inherits = &ata_sff_port_ops,
28332 .qc_issue = ns87410_qc_issue,
28333 .cable_detect = ata_cable_40wire,
28334diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28335index 773b159..55f454e 100644
28336--- a/drivers/ata/pata_ns87415.c
28337+++ b/drivers/ata/pata_ns87415.c
28338@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28339 }
28340 #endif /* 87560 SuperIO Support */
28341
28342-static struct ata_port_operations ns87415_pata_ops = {
28343+static const struct ata_port_operations ns87415_pata_ops = {
28344 .inherits = &ata_bmdma_port_ops,
28345
28346 .check_atapi_dma = ns87415_check_atapi_dma,
28347@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28348 };
28349
28350 #if defined(CONFIG_SUPERIO)
28351-static struct ata_port_operations ns87560_pata_ops = {
28352+static const struct ata_port_operations ns87560_pata_ops = {
28353 .inherits = &ns87415_pata_ops,
28354 .sff_tf_read = ns87560_tf_read,
28355 .sff_check_status = ns87560_check_status,
28356diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28357index d6f6956..639295b 100644
28358--- a/drivers/ata/pata_octeon_cf.c
28359+++ b/drivers/ata/pata_octeon_cf.c
28360@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28361 return 0;
28362 }
28363
28364+/* cannot be const */
28365 static struct ata_port_operations octeon_cf_ops = {
28366 .inherits = &ata_sff_port_ops,
28367 .check_atapi_dma = octeon_cf_check_atapi_dma,
28368diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28369index 84ac503..adee1cd 100644
28370--- a/drivers/ata/pata_oldpiix.c
28371+++ b/drivers/ata/pata_oldpiix.c
28372@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28373 ATA_BMDMA_SHT(DRV_NAME),
28374 };
28375
28376-static struct ata_port_operations oldpiix_pata_ops = {
28377+static const struct ata_port_operations oldpiix_pata_ops = {
28378 .inherits = &ata_bmdma_port_ops,
28379 .qc_issue = oldpiix_qc_issue,
28380 .cable_detect = ata_cable_40wire,
28381diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28382index 99eddda..3a4c0aa 100644
28383--- a/drivers/ata/pata_opti.c
28384+++ b/drivers/ata/pata_opti.c
28385@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28386 ATA_PIO_SHT(DRV_NAME),
28387 };
28388
28389-static struct ata_port_operations opti_port_ops = {
28390+static const struct ata_port_operations opti_port_ops = {
28391 .inherits = &ata_sff_port_ops,
28392 .cable_detect = ata_cable_40wire,
28393 .set_piomode = opti_set_piomode,
28394diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28395index 86885a4..8e9968d 100644
28396--- a/drivers/ata/pata_optidma.c
28397+++ b/drivers/ata/pata_optidma.c
28398@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28399 ATA_BMDMA_SHT(DRV_NAME),
28400 };
28401
28402-static struct ata_port_operations optidma_port_ops = {
28403+static const struct ata_port_operations optidma_port_ops = {
28404 .inherits = &ata_bmdma_port_ops,
28405 .cable_detect = ata_cable_40wire,
28406 .set_piomode = optidma_set_pio_mode,
28407@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28408 .prereset = optidma_pre_reset,
28409 };
28410
28411-static struct ata_port_operations optiplus_port_ops = {
28412+static const struct ata_port_operations optiplus_port_ops = {
28413 .inherits = &optidma_port_ops,
28414 .set_piomode = optiplus_set_pio_mode,
28415 .set_dmamode = optiplus_set_dma_mode,
28416diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28417index 11fb4cc..1a14022 100644
28418--- a/drivers/ata/pata_palmld.c
28419+++ b/drivers/ata/pata_palmld.c
28420@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28421 ATA_PIO_SHT(DRV_NAME),
28422 };
28423
28424-static struct ata_port_operations palmld_port_ops = {
28425+static const struct ata_port_operations palmld_port_ops = {
28426 .inherits = &ata_sff_port_ops,
28427 .sff_data_xfer = ata_sff_data_xfer_noirq,
28428 .cable_detect = ata_cable_40wire,
28429diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28430index dc99e26..7f4b1e4 100644
28431--- a/drivers/ata/pata_pcmcia.c
28432+++ b/drivers/ata/pata_pcmcia.c
28433@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28434 ATA_PIO_SHT(DRV_NAME),
28435 };
28436
28437-static struct ata_port_operations pcmcia_port_ops = {
28438+static const struct ata_port_operations pcmcia_port_ops = {
28439 .inherits = &ata_sff_port_ops,
28440 .sff_data_xfer = ata_sff_data_xfer_noirq,
28441 .cable_detect = ata_cable_40wire,
28442 .set_mode = pcmcia_set_mode,
28443 };
28444
28445-static struct ata_port_operations pcmcia_8bit_port_ops = {
28446+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28447 .inherits = &ata_sff_port_ops,
28448 .sff_data_xfer = ata_data_xfer_8bit,
28449 .cable_detect = ata_cable_40wire,
28450@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28451 unsigned long io_base, ctl_base;
28452 void __iomem *io_addr, *ctl_addr;
28453 int n_ports = 1;
28454- struct ata_port_operations *ops = &pcmcia_port_ops;
28455+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28456
28457 info = kzalloc(sizeof(*info), GFP_KERNEL);
28458 if (info == NULL)
28459diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28460index ca5cad0..3a1f125 100644
28461--- a/drivers/ata/pata_pdc2027x.c
28462+++ b/drivers/ata/pata_pdc2027x.c
28463@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28464 ATA_BMDMA_SHT(DRV_NAME),
28465 };
28466
28467-static struct ata_port_operations pdc2027x_pata100_ops = {
28468+static const struct ata_port_operations pdc2027x_pata100_ops = {
28469 .inherits = &ata_bmdma_port_ops,
28470 .check_atapi_dma = pdc2027x_check_atapi_dma,
28471 .cable_detect = pdc2027x_cable_detect,
28472 .prereset = pdc2027x_prereset,
28473 };
28474
28475-static struct ata_port_operations pdc2027x_pata133_ops = {
28476+static const struct ata_port_operations pdc2027x_pata133_ops = {
28477 .inherits = &pdc2027x_pata100_ops,
28478 .mode_filter = pdc2027x_mode_filter,
28479 .set_piomode = pdc2027x_set_piomode,
28480diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28481index 2911120..4bf62aa 100644
28482--- a/drivers/ata/pata_pdc202xx_old.c
28483+++ b/drivers/ata/pata_pdc202xx_old.c
28484@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28485 ATA_BMDMA_SHT(DRV_NAME),
28486 };
28487
28488-static struct ata_port_operations pdc2024x_port_ops = {
28489+static const struct ata_port_operations pdc2024x_port_ops = {
28490 .inherits = &ata_bmdma_port_ops,
28491
28492 .cable_detect = ata_cable_40wire,
28493@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28494 .sff_exec_command = pdc202xx_exec_command,
28495 };
28496
28497-static struct ata_port_operations pdc2026x_port_ops = {
28498+static const struct ata_port_operations pdc2026x_port_ops = {
28499 .inherits = &pdc2024x_port_ops,
28500
28501 .check_atapi_dma = pdc2026x_check_atapi_dma,
28502diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28503index 3f6ebc6..a18c358 100644
28504--- a/drivers/ata/pata_platform.c
28505+++ b/drivers/ata/pata_platform.c
28506@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28507 ATA_PIO_SHT(DRV_NAME),
28508 };
28509
28510-static struct ata_port_operations pata_platform_port_ops = {
28511+static const struct ata_port_operations pata_platform_port_ops = {
28512 .inherits = &ata_sff_port_ops,
28513 .sff_data_xfer = ata_sff_data_xfer_noirq,
28514 .cable_detect = ata_cable_unknown,
28515diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28516index 45879dc..165a9f9 100644
28517--- a/drivers/ata/pata_qdi.c
28518+++ b/drivers/ata/pata_qdi.c
28519@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28520 ATA_PIO_SHT(DRV_NAME),
28521 };
28522
28523-static struct ata_port_operations qdi6500_port_ops = {
28524+static const struct ata_port_operations qdi6500_port_ops = {
28525 .inherits = &ata_sff_port_ops,
28526 .qc_issue = qdi_qc_issue,
28527 .sff_data_xfer = qdi_data_xfer,
28528@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28529 .set_piomode = qdi6500_set_piomode,
28530 };
28531
28532-static struct ata_port_operations qdi6580_port_ops = {
28533+static const struct ata_port_operations qdi6580_port_ops = {
28534 .inherits = &qdi6500_port_ops,
28535 .set_piomode = qdi6580_set_piomode,
28536 };
28537diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28538index 4401b33..716c5cc 100644
28539--- a/drivers/ata/pata_radisys.c
28540+++ b/drivers/ata/pata_radisys.c
28541@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28542 ATA_BMDMA_SHT(DRV_NAME),
28543 };
28544
28545-static struct ata_port_operations radisys_pata_ops = {
28546+static const struct ata_port_operations radisys_pata_ops = {
28547 .inherits = &ata_bmdma_port_ops,
28548 .qc_issue = radisys_qc_issue,
28549 .cable_detect = ata_cable_unknown,
28550diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28551index 45f1e10..fab6bca 100644
28552--- a/drivers/ata/pata_rb532_cf.c
28553+++ b/drivers/ata/pata_rb532_cf.c
28554@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28555 return IRQ_HANDLED;
28556 }
28557
28558-static struct ata_port_operations rb532_pata_port_ops = {
28559+static const struct ata_port_operations rb532_pata_port_ops = {
28560 .inherits = &ata_sff_port_ops,
28561 .sff_data_xfer = ata_sff_data_xfer32,
28562 };
28563diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28564index c843a1e..b5853c3 100644
28565--- a/drivers/ata/pata_rdc.c
28566+++ b/drivers/ata/pata_rdc.c
28567@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28568 pci_write_config_byte(dev, 0x48, udma_enable);
28569 }
28570
28571-static struct ata_port_operations rdc_pata_ops = {
28572+static const struct ata_port_operations rdc_pata_ops = {
28573 .inherits = &ata_bmdma32_port_ops,
28574 .cable_detect = rdc_pata_cable_detect,
28575 .set_piomode = rdc_set_piomode,
28576diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28577index a5e4dfe..080c8c9 100644
28578--- a/drivers/ata/pata_rz1000.c
28579+++ b/drivers/ata/pata_rz1000.c
28580@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28581 ATA_PIO_SHT(DRV_NAME),
28582 };
28583
28584-static struct ata_port_operations rz1000_port_ops = {
28585+static const struct ata_port_operations rz1000_port_ops = {
28586 .inherits = &ata_sff_port_ops,
28587 .cable_detect = ata_cable_40wire,
28588 .set_mode = rz1000_set_mode,
28589diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28590index 3bbed83..e309daf 100644
28591--- a/drivers/ata/pata_sc1200.c
28592+++ b/drivers/ata/pata_sc1200.c
28593@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28594 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28595 };
28596
28597-static struct ata_port_operations sc1200_port_ops = {
28598+static const struct ata_port_operations sc1200_port_ops = {
28599 .inherits = &ata_bmdma_port_ops,
28600 .qc_prep = ata_sff_dumb_qc_prep,
28601 .qc_issue = sc1200_qc_issue,
28602diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28603index 4257d6b..4c1d9d5 100644
28604--- a/drivers/ata/pata_scc.c
28605+++ b/drivers/ata/pata_scc.c
28606@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28607 ATA_BMDMA_SHT(DRV_NAME),
28608 };
28609
28610-static struct ata_port_operations scc_pata_ops = {
28611+static const struct ata_port_operations scc_pata_ops = {
28612 .inherits = &ata_bmdma_port_ops,
28613
28614 .set_piomode = scc_set_piomode,
28615diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28616index 99cceb4..e2e0a87 100644
28617--- a/drivers/ata/pata_sch.c
28618+++ b/drivers/ata/pata_sch.c
28619@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28620 ATA_BMDMA_SHT(DRV_NAME),
28621 };
28622
28623-static struct ata_port_operations sch_pata_ops = {
28624+static const struct ata_port_operations sch_pata_ops = {
28625 .inherits = &ata_bmdma_port_ops,
28626 .cable_detect = ata_cable_unknown,
28627 .set_piomode = sch_set_piomode,
28628diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28629index beaed12..39969f1 100644
28630--- a/drivers/ata/pata_serverworks.c
28631+++ b/drivers/ata/pata_serverworks.c
28632@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28633 ATA_BMDMA_SHT(DRV_NAME),
28634 };
28635
28636-static struct ata_port_operations serverworks_osb4_port_ops = {
28637+static const struct ata_port_operations serverworks_osb4_port_ops = {
28638 .inherits = &ata_bmdma_port_ops,
28639 .cable_detect = serverworks_cable_detect,
28640 .mode_filter = serverworks_osb4_filter,
28641@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28642 .set_dmamode = serverworks_set_dmamode,
28643 };
28644
28645-static struct ata_port_operations serverworks_csb_port_ops = {
28646+static const struct ata_port_operations serverworks_csb_port_ops = {
28647 .inherits = &serverworks_osb4_port_ops,
28648 .mode_filter = serverworks_csb_filter,
28649 };
28650diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28651index a2ace48..0463b44 100644
28652--- a/drivers/ata/pata_sil680.c
28653+++ b/drivers/ata/pata_sil680.c
28654@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28655 ATA_BMDMA_SHT(DRV_NAME),
28656 };
28657
28658-static struct ata_port_operations sil680_port_ops = {
28659+static const struct ata_port_operations sil680_port_ops = {
28660 .inherits = &ata_bmdma32_port_ops,
28661 .cable_detect = sil680_cable_detect,
28662 .set_piomode = sil680_set_piomode,
28663diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28664index 488e77b..b3724d5 100644
28665--- a/drivers/ata/pata_sis.c
28666+++ b/drivers/ata/pata_sis.c
28667@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28668 ATA_BMDMA_SHT(DRV_NAME),
28669 };
28670
28671-static struct ata_port_operations sis_133_for_sata_ops = {
28672+static const struct ata_port_operations sis_133_for_sata_ops = {
28673 .inherits = &ata_bmdma_port_ops,
28674 .set_piomode = sis_133_set_piomode,
28675 .set_dmamode = sis_133_set_dmamode,
28676 .cable_detect = sis_133_cable_detect,
28677 };
28678
28679-static struct ata_port_operations sis_base_ops = {
28680+static const struct ata_port_operations sis_base_ops = {
28681 .inherits = &ata_bmdma_port_ops,
28682 .prereset = sis_pre_reset,
28683 };
28684
28685-static struct ata_port_operations sis_133_ops = {
28686+static const struct ata_port_operations sis_133_ops = {
28687 .inherits = &sis_base_ops,
28688 .set_piomode = sis_133_set_piomode,
28689 .set_dmamode = sis_133_set_dmamode,
28690 .cable_detect = sis_133_cable_detect,
28691 };
28692
28693-static struct ata_port_operations sis_133_early_ops = {
28694+static const struct ata_port_operations sis_133_early_ops = {
28695 .inherits = &sis_base_ops,
28696 .set_piomode = sis_100_set_piomode,
28697 .set_dmamode = sis_133_early_set_dmamode,
28698 .cable_detect = sis_66_cable_detect,
28699 };
28700
28701-static struct ata_port_operations sis_100_ops = {
28702+static const struct ata_port_operations sis_100_ops = {
28703 .inherits = &sis_base_ops,
28704 .set_piomode = sis_100_set_piomode,
28705 .set_dmamode = sis_100_set_dmamode,
28706 .cable_detect = sis_66_cable_detect,
28707 };
28708
28709-static struct ata_port_operations sis_66_ops = {
28710+static const struct ata_port_operations sis_66_ops = {
28711 .inherits = &sis_base_ops,
28712 .set_piomode = sis_old_set_piomode,
28713 .set_dmamode = sis_66_set_dmamode,
28714 .cable_detect = sis_66_cable_detect,
28715 };
28716
28717-static struct ata_port_operations sis_old_ops = {
28718+static const struct ata_port_operations sis_old_ops = {
28719 .inherits = &sis_base_ops,
28720 .set_piomode = sis_old_set_piomode,
28721 .set_dmamode = sis_old_set_dmamode,
28722diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28723index 29f733c..43e9ca0 100644
28724--- a/drivers/ata/pata_sl82c105.c
28725+++ b/drivers/ata/pata_sl82c105.c
28726@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28727 ATA_BMDMA_SHT(DRV_NAME),
28728 };
28729
28730-static struct ata_port_operations sl82c105_port_ops = {
28731+static const struct ata_port_operations sl82c105_port_ops = {
28732 .inherits = &ata_bmdma_port_ops,
28733 .qc_defer = sl82c105_qc_defer,
28734 .bmdma_start = sl82c105_bmdma_start,
28735diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28736index f1f13ff..df39e99 100644
28737--- a/drivers/ata/pata_triflex.c
28738+++ b/drivers/ata/pata_triflex.c
28739@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28740 ATA_BMDMA_SHT(DRV_NAME),
28741 };
28742
28743-static struct ata_port_operations triflex_port_ops = {
28744+static const struct ata_port_operations triflex_port_ops = {
28745 .inherits = &ata_bmdma_port_ops,
28746 .bmdma_start = triflex_bmdma_start,
28747 .bmdma_stop = triflex_bmdma_stop,
28748diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28749index 1d73b8d..98a4b29 100644
28750--- a/drivers/ata/pata_via.c
28751+++ b/drivers/ata/pata_via.c
28752@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28753 ATA_BMDMA_SHT(DRV_NAME),
28754 };
28755
28756-static struct ata_port_operations via_port_ops = {
28757+static const struct ata_port_operations via_port_ops = {
28758 .inherits = &ata_bmdma_port_ops,
28759 .cable_detect = via_cable_detect,
28760 .set_piomode = via_set_piomode,
28761@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28762 .port_start = via_port_start,
28763 };
28764
28765-static struct ata_port_operations via_port_ops_noirq = {
28766+static const struct ata_port_operations via_port_ops_noirq = {
28767 .inherits = &via_port_ops,
28768 .sff_data_xfer = ata_sff_data_xfer_noirq,
28769 };
28770diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28771index 6d8619b..ad511c4 100644
28772--- a/drivers/ata/pata_winbond.c
28773+++ b/drivers/ata/pata_winbond.c
28774@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28775 ATA_PIO_SHT(DRV_NAME),
28776 };
28777
28778-static struct ata_port_operations winbond_port_ops = {
28779+static const struct ata_port_operations winbond_port_ops = {
28780 .inherits = &ata_sff_port_ops,
28781 .sff_data_xfer = winbond_data_xfer,
28782 .cable_detect = ata_cable_40wire,
28783diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28784index 6c65b07..f996ec7 100644
28785--- a/drivers/ata/pdc_adma.c
28786+++ b/drivers/ata/pdc_adma.c
28787@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
28788 .dma_boundary = ADMA_DMA_BOUNDARY,
28789 };
28790
28791-static struct ata_port_operations adma_ata_ops = {
28792+static const struct ata_port_operations adma_ata_ops = {
28793 .inherits = &ata_sff_port_ops,
28794
28795 .lost_interrupt = ATA_OP_NULL,
28796diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
28797index 172b57e..c49bc1e 100644
28798--- a/drivers/ata/sata_fsl.c
28799+++ b/drivers/ata/sata_fsl.c
28800@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
28801 .dma_boundary = ATA_DMA_BOUNDARY,
28802 };
28803
28804-static struct ata_port_operations sata_fsl_ops = {
28805+static const struct ata_port_operations sata_fsl_ops = {
28806 .inherits = &sata_pmp_port_ops,
28807
28808 .qc_defer = ata_std_qc_defer,
28809diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
28810index 4406902..60603ef 100644
28811--- a/drivers/ata/sata_inic162x.c
28812+++ b/drivers/ata/sata_inic162x.c
28813@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
28814 return 0;
28815 }
28816
28817-static struct ata_port_operations inic_port_ops = {
28818+static const struct ata_port_operations inic_port_ops = {
28819 .inherits = &sata_port_ops,
28820
28821 .check_atapi_dma = inic_check_atapi_dma,
28822diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
28823index cf41126..8107be6 100644
28824--- a/drivers/ata/sata_mv.c
28825+++ b/drivers/ata/sata_mv.c
28826@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
28827 .dma_boundary = MV_DMA_BOUNDARY,
28828 };
28829
28830-static struct ata_port_operations mv5_ops = {
28831+static const struct ata_port_operations mv5_ops = {
28832 .inherits = &ata_sff_port_ops,
28833
28834 .lost_interrupt = ATA_OP_NULL,
28835@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
28836 .port_stop = mv_port_stop,
28837 };
28838
28839-static struct ata_port_operations mv6_ops = {
28840+static const struct ata_port_operations mv6_ops = {
28841 .inherits = &mv5_ops,
28842 .dev_config = mv6_dev_config,
28843 .scr_read = mv_scr_read,
28844@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
28845 .bmdma_status = mv_bmdma_status,
28846 };
28847
28848-static struct ata_port_operations mv_iie_ops = {
28849+static const struct ata_port_operations mv_iie_ops = {
28850 .inherits = &mv6_ops,
28851 .dev_config = ATA_OP_NULL,
28852 .qc_prep = mv_qc_prep_iie,
28853diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
28854index ae2297c..d5c9c33 100644
28855--- a/drivers/ata/sata_nv.c
28856+++ b/drivers/ata/sata_nv.c
28857@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
28858 * cases. Define nv_hardreset() which only kicks in for post-boot
28859 * probing and use it for all variants.
28860 */
28861-static struct ata_port_operations nv_generic_ops = {
28862+static const struct ata_port_operations nv_generic_ops = {
28863 .inherits = &ata_bmdma_port_ops,
28864 .lost_interrupt = ATA_OP_NULL,
28865 .scr_read = nv_scr_read,
28866@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
28867 .hardreset = nv_hardreset,
28868 };
28869
28870-static struct ata_port_operations nv_nf2_ops = {
28871+static const struct ata_port_operations nv_nf2_ops = {
28872 .inherits = &nv_generic_ops,
28873 .freeze = nv_nf2_freeze,
28874 .thaw = nv_nf2_thaw,
28875 };
28876
28877-static struct ata_port_operations nv_ck804_ops = {
28878+static const struct ata_port_operations nv_ck804_ops = {
28879 .inherits = &nv_generic_ops,
28880 .freeze = nv_ck804_freeze,
28881 .thaw = nv_ck804_thaw,
28882 .host_stop = nv_ck804_host_stop,
28883 };
28884
28885-static struct ata_port_operations nv_adma_ops = {
28886+static const struct ata_port_operations nv_adma_ops = {
28887 .inherits = &nv_ck804_ops,
28888
28889 .check_atapi_dma = nv_adma_check_atapi_dma,
28890@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
28891 .host_stop = nv_adma_host_stop,
28892 };
28893
28894-static struct ata_port_operations nv_swncq_ops = {
28895+static const struct ata_port_operations nv_swncq_ops = {
28896 .inherits = &nv_generic_ops,
28897
28898 .qc_defer = ata_std_qc_defer,
28899diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
28900index 07d8d00..6cc70bb 100644
28901--- a/drivers/ata/sata_promise.c
28902+++ b/drivers/ata/sata_promise.c
28903@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
28904 .error_handler = pdc_error_handler,
28905 };
28906
28907-static struct ata_port_operations pdc_sata_ops = {
28908+static const struct ata_port_operations pdc_sata_ops = {
28909 .inherits = &pdc_common_ops,
28910 .cable_detect = pdc_sata_cable_detect,
28911 .freeze = pdc_sata_freeze,
28912@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
28913
28914 /* First-generation chips need a more restrictive ->check_atapi_dma op,
28915 and ->freeze/thaw that ignore the hotplug controls. */
28916-static struct ata_port_operations pdc_old_sata_ops = {
28917+static const struct ata_port_operations pdc_old_sata_ops = {
28918 .inherits = &pdc_sata_ops,
28919 .freeze = pdc_freeze,
28920 .thaw = pdc_thaw,
28921 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
28922 };
28923
28924-static struct ata_port_operations pdc_pata_ops = {
28925+static const struct ata_port_operations pdc_pata_ops = {
28926 .inherits = &pdc_common_ops,
28927 .cable_detect = pdc_pata_cable_detect,
28928 .freeze = pdc_freeze,
28929diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
28930index 326c0cf..36ecebe 100644
28931--- a/drivers/ata/sata_qstor.c
28932+++ b/drivers/ata/sata_qstor.c
28933@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
28934 .dma_boundary = QS_DMA_BOUNDARY,
28935 };
28936
28937-static struct ata_port_operations qs_ata_ops = {
28938+static const struct ata_port_operations qs_ata_ops = {
28939 .inherits = &ata_sff_port_ops,
28940
28941 .check_atapi_dma = qs_check_atapi_dma,
28942diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
28943index 3cb69d5..0871d3c 100644
28944--- a/drivers/ata/sata_sil.c
28945+++ b/drivers/ata/sata_sil.c
28946@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
28947 .sg_tablesize = ATA_MAX_PRD
28948 };
28949
28950-static struct ata_port_operations sil_ops = {
28951+static const struct ata_port_operations sil_ops = {
28952 .inherits = &ata_bmdma32_port_ops,
28953 .dev_config = sil_dev_config,
28954 .set_mode = sil_set_mode,
28955diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
28956index e6946fc..eddb794 100644
28957--- a/drivers/ata/sata_sil24.c
28958+++ b/drivers/ata/sata_sil24.c
28959@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
28960 .dma_boundary = ATA_DMA_BOUNDARY,
28961 };
28962
28963-static struct ata_port_operations sil24_ops = {
28964+static const struct ata_port_operations sil24_ops = {
28965 .inherits = &sata_pmp_port_ops,
28966
28967 .qc_defer = sil24_qc_defer,
28968diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
28969index f8a91bf..9cb06b6 100644
28970--- a/drivers/ata/sata_sis.c
28971+++ b/drivers/ata/sata_sis.c
28972@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
28973 ATA_BMDMA_SHT(DRV_NAME),
28974 };
28975
28976-static struct ata_port_operations sis_ops = {
28977+static const struct ata_port_operations sis_ops = {
28978 .inherits = &ata_bmdma_port_ops,
28979 .scr_read = sis_scr_read,
28980 .scr_write = sis_scr_write,
28981diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
28982index 7257f2d..d04c6f5 100644
28983--- a/drivers/ata/sata_svw.c
28984+++ b/drivers/ata/sata_svw.c
28985@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
28986 };
28987
28988
28989-static struct ata_port_operations k2_sata_ops = {
28990+static const struct ata_port_operations k2_sata_ops = {
28991 .inherits = &ata_bmdma_port_ops,
28992 .sff_tf_load = k2_sata_tf_load,
28993 .sff_tf_read = k2_sata_tf_read,
28994diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
28995index bbcf970..cd0df0d 100644
28996--- a/drivers/ata/sata_sx4.c
28997+++ b/drivers/ata/sata_sx4.c
28998@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
28999 };
29000
29001 /* TODO: inherit from base port_ops after converting to new EH */
29002-static struct ata_port_operations pdc_20621_ops = {
29003+static const struct ata_port_operations pdc_20621_ops = {
29004 .inherits = &ata_sff_port_ops,
29005
29006 .check_atapi_dma = pdc_check_atapi_dma,
29007diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29008index e5bff47..089d859 100644
29009--- a/drivers/ata/sata_uli.c
29010+++ b/drivers/ata/sata_uli.c
29011@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29012 ATA_BMDMA_SHT(DRV_NAME),
29013 };
29014
29015-static struct ata_port_operations uli_ops = {
29016+static const struct ata_port_operations uli_ops = {
29017 .inherits = &ata_bmdma_port_ops,
29018 .scr_read = uli_scr_read,
29019 .scr_write = uli_scr_write,
29020diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29021index f5dcca7..77b94eb 100644
29022--- a/drivers/ata/sata_via.c
29023+++ b/drivers/ata/sata_via.c
29024@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29025 ATA_BMDMA_SHT(DRV_NAME),
29026 };
29027
29028-static struct ata_port_operations svia_base_ops = {
29029+static const struct ata_port_operations svia_base_ops = {
29030 .inherits = &ata_bmdma_port_ops,
29031 .sff_tf_load = svia_tf_load,
29032 };
29033
29034-static struct ata_port_operations vt6420_sata_ops = {
29035+static const struct ata_port_operations vt6420_sata_ops = {
29036 .inherits = &svia_base_ops,
29037 .freeze = svia_noop_freeze,
29038 .prereset = vt6420_prereset,
29039 .bmdma_start = vt6420_bmdma_start,
29040 };
29041
29042-static struct ata_port_operations vt6421_pata_ops = {
29043+static const struct ata_port_operations vt6421_pata_ops = {
29044 .inherits = &svia_base_ops,
29045 .cable_detect = vt6421_pata_cable_detect,
29046 .set_piomode = vt6421_set_pio_mode,
29047 .set_dmamode = vt6421_set_dma_mode,
29048 };
29049
29050-static struct ata_port_operations vt6421_sata_ops = {
29051+static const struct ata_port_operations vt6421_sata_ops = {
29052 .inherits = &svia_base_ops,
29053 .scr_read = svia_scr_read,
29054 .scr_write = svia_scr_write,
29055 };
29056
29057-static struct ata_port_operations vt8251_ops = {
29058+static const struct ata_port_operations vt8251_ops = {
29059 .inherits = &svia_base_ops,
29060 .hardreset = sata_std_hardreset,
29061 .scr_read = vt8251_scr_read,
29062diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29063index 8b2a278..51e65d3 100644
29064--- a/drivers/ata/sata_vsc.c
29065+++ b/drivers/ata/sata_vsc.c
29066@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29067 };
29068
29069
29070-static struct ata_port_operations vsc_sata_ops = {
29071+static const struct ata_port_operations vsc_sata_ops = {
29072 .inherits = &ata_bmdma_port_ops,
29073 /* The IRQ handling is not quite standard SFF behaviour so we
29074 cannot use the default lost interrupt handler */
29075diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29076index 5effec6..7e4019a 100644
29077--- a/drivers/atm/adummy.c
29078+++ b/drivers/atm/adummy.c
29079@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29080 vcc->pop(vcc, skb);
29081 else
29082 dev_kfree_skb_any(skb);
29083- atomic_inc(&vcc->stats->tx);
29084+ atomic_inc_unchecked(&vcc->stats->tx);
29085
29086 return 0;
29087 }
29088diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29089index 66e1813..26a27c6 100644
29090--- a/drivers/atm/ambassador.c
29091+++ b/drivers/atm/ambassador.c
29092@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29093 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29094
29095 // VC layer stats
29096- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29097+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29098
29099 // free the descriptor
29100 kfree (tx_descr);
29101@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29102 dump_skb ("<<<", vc, skb);
29103
29104 // VC layer stats
29105- atomic_inc(&atm_vcc->stats->rx);
29106+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29107 __net_timestamp(skb);
29108 // end of our responsability
29109 atm_vcc->push (atm_vcc, skb);
29110@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29111 } else {
29112 PRINTK (KERN_INFO, "dropped over-size frame");
29113 // should we count this?
29114- atomic_inc(&atm_vcc->stats->rx_drop);
29115+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29116 }
29117
29118 } else {
29119@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29120 }
29121
29122 if (check_area (skb->data, skb->len)) {
29123- atomic_inc(&atm_vcc->stats->tx_err);
29124+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29125 return -ENOMEM; // ?
29126 }
29127
29128diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29129index 02ad83d..6daffeb 100644
29130--- a/drivers/atm/atmtcp.c
29131+++ b/drivers/atm/atmtcp.c
29132@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29133 if (vcc->pop) vcc->pop(vcc,skb);
29134 else dev_kfree_skb(skb);
29135 if (dev_data) return 0;
29136- atomic_inc(&vcc->stats->tx_err);
29137+ atomic_inc_unchecked(&vcc->stats->tx_err);
29138 return -ENOLINK;
29139 }
29140 size = skb->len+sizeof(struct atmtcp_hdr);
29141@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29142 if (!new_skb) {
29143 if (vcc->pop) vcc->pop(vcc,skb);
29144 else dev_kfree_skb(skb);
29145- atomic_inc(&vcc->stats->tx_err);
29146+ atomic_inc_unchecked(&vcc->stats->tx_err);
29147 return -ENOBUFS;
29148 }
29149 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29150@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29151 if (vcc->pop) vcc->pop(vcc,skb);
29152 else dev_kfree_skb(skb);
29153 out_vcc->push(out_vcc,new_skb);
29154- atomic_inc(&vcc->stats->tx);
29155- atomic_inc(&out_vcc->stats->rx);
29156+ atomic_inc_unchecked(&vcc->stats->tx);
29157+ atomic_inc_unchecked(&out_vcc->stats->rx);
29158 return 0;
29159 }
29160
29161@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29162 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29163 read_unlock(&vcc_sklist_lock);
29164 if (!out_vcc) {
29165- atomic_inc(&vcc->stats->tx_err);
29166+ atomic_inc_unchecked(&vcc->stats->tx_err);
29167 goto done;
29168 }
29169 skb_pull(skb,sizeof(struct atmtcp_hdr));
29170@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29171 __net_timestamp(new_skb);
29172 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29173 out_vcc->push(out_vcc,new_skb);
29174- atomic_inc(&vcc->stats->tx);
29175- atomic_inc(&out_vcc->stats->rx);
29176+ atomic_inc_unchecked(&vcc->stats->tx);
29177+ atomic_inc_unchecked(&out_vcc->stats->rx);
29178 done:
29179 if (vcc->pop) vcc->pop(vcc,skb);
29180 else dev_kfree_skb(skb);
29181diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29182index 0c30261..3da356e 100644
29183--- a/drivers/atm/eni.c
29184+++ b/drivers/atm/eni.c
29185@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29186 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29187 vcc->dev->number);
29188 length = 0;
29189- atomic_inc(&vcc->stats->rx_err);
29190+ atomic_inc_unchecked(&vcc->stats->rx_err);
29191 }
29192 else {
29193 length = ATM_CELL_SIZE-1; /* no HEC */
29194@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29195 size);
29196 }
29197 eff = length = 0;
29198- atomic_inc(&vcc->stats->rx_err);
29199+ atomic_inc_unchecked(&vcc->stats->rx_err);
29200 }
29201 else {
29202 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29203@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29204 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29205 vcc->dev->number,vcc->vci,length,size << 2,descr);
29206 length = eff = 0;
29207- atomic_inc(&vcc->stats->rx_err);
29208+ atomic_inc_unchecked(&vcc->stats->rx_err);
29209 }
29210 }
29211 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29212@@ -770,7 +770,7 @@ rx_dequeued++;
29213 vcc->push(vcc,skb);
29214 pushed++;
29215 }
29216- atomic_inc(&vcc->stats->rx);
29217+ atomic_inc_unchecked(&vcc->stats->rx);
29218 }
29219 wake_up(&eni_dev->rx_wait);
29220 }
29221@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29222 PCI_DMA_TODEVICE);
29223 if (vcc->pop) vcc->pop(vcc,skb);
29224 else dev_kfree_skb_irq(skb);
29225- atomic_inc(&vcc->stats->tx);
29226+ atomic_inc_unchecked(&vcc->stats->tx);
29227 wake_up(&eni_dev->tx_wait);
29228 dma_complete++;
29229 }
29230@@ -1570,7 +1570,7 @@ tx_complete++;
29231 /*--------------------------------- entries ---------------------------------*/
29232
29233
29234-static const char *media_name[] __devinitdata = {
29235+static const char *media_name[] __devinitconst = {
29236 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29237 "UTP", "05?", "06?", "07?", /* 4- 7 */
29238 "TAXI","09?", "10?", "11?", /* 8-11 */
29239diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29240index cd5049a..a51209f 100644
29241--- a/drivers/atm/firestream.c
29242+++ b/drivers/atm/firestream.c
29243@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29244 }
29245 }
29246
29247- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29248+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29249
29250 fs_dprintk (FS_DEBUG_TXMEM, "i");
29251 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29252@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29253 #endif
29254 skb_put (skb, qe->p1 & 0xffff);
29255 ATM_SKB(skb)->vcc = atm_vcc;
29256- atomic_inc(&atm_vcc->stats->rx);
29257+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29258 __net_timestamp(skb);
29259 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29260 atm_vcc->push (atm_vcc, skb);
29261@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29262 kfree (pe);
29263 }
29264 if (atm_vcc)
29265- atomic_inc(&atm_vcc->stats->rx_drop);
29266+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29267 break;
29268 case 0x1f: /* Reassembly abort: no buffers. */
29269 /* Silently increment error counter. */
29270 if (atm_vcc)
29271- atomic_inc(&atm_vcc->stats->rx_drop);
29272+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29273 break;
29274 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29275 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29276diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29277index f766cc4..a34002e 100644
29278--- a/drivers/atm/fore200e.c
29279+++ b/drivers/atm/fore200e.c
29280@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29281 #endif
29282 /* check error condition */
29283 if (*entry->status & STATUS_ERROR)
29284- atomic_inc(&vcc->stats->tx_err);
29285+ atomic_inc_unchecked(&vcc->stats->tx_err);
29286 else
29287- atomic_inc(&vcc->stats->tx);
29288+ atomic_inc_unchecked(&vcc->stats->tx);
29289 }
29290 }
29291
29292@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29293 if (skb == NULL) {
29294 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29295
29296- atomic_inc(&vcc->stats->rx_drop);
29297+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29298 return -ENOMEM;
29299 }
29300
29301@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29302
29303 dev_kfree_skb_any(skb);
29304
29305- atomic_inc(&vcc->stats->rx_drop);
29306+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29307 return -ENOMEM;
29308 }
29309
29310 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29311
29312 vcc->push(vcc, skb);
29313- atomic_inc(&vcc->stats->rx);
29314+ atomic_inc_unchecked(&vcc->stats->rx);
29315
29316 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29317
29318@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29319 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29320 fore200e->atm_dev->number,
29321 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29322- atomic_inc(&vcc->stats->rx_err);
29323+ atomic_inc_unchecked(&vcc->stats->rx_err);
29324 }
29325 }
29326
29327@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29328 goto retry_here;
29329 }
29330
29331- atomic_inc(&vcc->stats->tx_err);
29332+ atomic_inc_unchecked(&vcc->stats->tx_err);
29333
29334 fore200e->tx_sat++;
29335 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29336diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29337index 7066703..2b130de 100644
29338--- a/drivers/atm/he.c
29339+++ b/drivers/atm/he.c
29340@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29341
29342 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29343 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29344- atomic_inc(&vcc->stats->rx_drop);
29345+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29346 goto return_host_buffers;
29347 }
29348
29349@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29350 RBRQ_LEN_ERR(he_dev->rbrq_head)
29351 ? "LEN_ERR" : "",
29352 vcc->vpi, vcc->vci);
29353- atomic_inc(&vcc->stats->rx_err);
29354+ atomic_inc_unchecked(&vcc->stats->rx_err);
29355 goto return_host_buffers;
29356 }
29357
29358@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29359 vcc->push(vcc, skb);
29360 spin_lock(&he_dev->global_lock);
29361
29362- atomic_inc(&vcc->stats->rx);
29363+ atomic_inc_unchecked(&vcc->stats->rx);
29364
29365 return_host_buffers:
29366 ++pdus_assembled;
29367@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29368 tpd->vcc->pop(tpd->vcc, tpd->skb);
29369 else
29370 dev_kfree_skb_any(tpd->skb);
29371- atomic_inc(&tpd->vcc->stats->tx_err);
29372+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29373 }
29374 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29375 return;
29376@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29377 vcc->pop(vcc, skb);
29378 else
29379 dev_kfree_skb_any(skb);
29380- atomic_inc(&vcc->stats->tx_err);
29381+ atomic_inc_unchecked(&vcc->stats->tx_err);
29382 return -EINVAL;
29383 }
29384
29385@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29386 vcc->pop(vcc, skb);
29387 else
29388 dev_kfree_skb_any(skb);
29389- atomic_inc(&vcc->stats->tx_err);
29390+ atomic_inc_unchecked(&vcc->stats->tx_err);
29391 return -EINVAL;
29392 }
29393 #endif
29394@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29395 vcc->pop(vcc, skb);
29396 else
29397 dev_kfree_skb_any(skb);
29398- atomic_inc(&vcc->stats->tx_err);
29399+ atomic_inc_unchecked(&vcc->stats->tx_err);
29400 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29401 return -ENOMEM;
29402 }
29403@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29404 vcc->pop(vcc, skb);
29405 else
29406 dev_kfree_skb_any(skb);
29407- atomic_inc(&vcc->stats->tx_err);
29408+ atomic_inc_unchecked(&vcc->stats->tx_err);
29409 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29410 return -ENOMEM;
29411 }
29412@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29413 __enqueue_tpd(he_dev, tpd, cid);
29414 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29415
29416- atomic_inc(&vcc->stats->tx);
29417+ atomic_inc_unchecked(&vcc->stats->tx);
29418
29419 return 0;
29420 }
29421diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29422index 4e49021..01b1512 100644
29423--- a/drivers/atm/horizon.c
29424+++ b/drivers/atm/horizon.c
29425@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29426 {
29427 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29428 // VC layer stats
29429- atomic_inc(&vcc->stats->rx);
29430+ atomic_inc_unchecked(&vcc->stats->rx);
29431 __net_timestamp(skb);
29432 // end of our responsability
29433 vcc->push (vcc, skb);
29434@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29435 dev->tx_iovec = NULL;
29436
29437 // VC layer stats
29438- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29439+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29440
29441 // free the skb
29442 hrz_kfree_skb (skb);
29443diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29444index e33ae00..9deb4ab 100644
29445--- a/drivers/atm/idt77252.c
29446+++ b/drivers/atm/idt77252.c
29447@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29448 else
29449 dev_kfree_skb(skb);
29450
29451- atomic_inc(&vcc->stats->tx);
29452+ atomic_inc_unchecked(&vcc->stats->tx);
29453 }
29454
29455 atomic_dec(&scq->used);
29456@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29457 if ((sb = dev_alloc_skb(64)) == NULL) {
29458 printk("%s: Can't allocate buffers for aal0.\n",
29459 card->name);
29460- atomic_add(i, &vcc->stats->rx_drop);
29461+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29462 break;
29463 }
29464 if (!atm_charge(vcc, sb->truesize)) {
29465 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29466 card->name);
29467- atomic_add(i - 1, &vcc->stats->rx_drop);
29468+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29469 dev_kfree_skb(sb);
29470 break;
29471 }
29472@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29473 ATM_SKB(sb)->vcc = vcc;
29474 __net_timestamp(sb);
29475 vcc->push(vcc, sb);
29476- atomic_inc(&vcc->stats->rx);
29477+ atomic_inc_unchecked(&vcc->stats->rx);
29478
29479 cell += ATM_CELL_PAYLOAD;
29480 }
29481@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29482 "(CDC: %08x)\n",
29483 card->name, len, rpp->len, readl(SAR_REG_CDC));
29484 recycle_rx_pool_skb(card, rpp);
29485- atomic_inc(&vcc->stats->rx_err);
29486+ atomic_inc_unchecked(&vcc->stats->rx_err);
29487 return;
29488 }
29489 if (stat & SAR_RSQE_CRC) {
29490 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29491 recycle_rx_pool_skb(card, rpp);
29492- atomic_inc(&vcc->stats->rx_err);
29493+ atomic_inc_unchecked(&vcc->stats->rx_err);
29494 return;
29495 }
29496 if (skb_queue_len(&rpp->queue) > 1) {
29497@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29498 RXPRINTK("%s: Can't alloc RX skb.\n",
29499 card->name);
29500 recycle_rx_pool_skb(card, rpp);
29501- atomic_inc(&vcc->stats->rx_err);
29502+ atomic_inc_unchecked(&vcc->stats->rx_err);
29503 return;
29504 }
29505 if (!atm_charge(vcc, skb->truesize)) {
29506@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29507 __net_timestamp(skb);
29508
29509 vcc->push(vcc, skb);
29510- atomic_inc(&vcc->stats->rx);
29511+ atomic_inc_unchecked(&vcc->stats->rx);
29512
29513 return;
29514 }
29515@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29516 __net_timestamp(skb);
29517
29518 vcc->push(vcc, skb);
29519- atomic_inc(&vcc->stats->rx);
29520+ atomic_inc_unchecked(&vcc->stats->rx);
29521
29522 if (skb->truesize > SAR_FB_SIZE_3)
29523 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29524@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29525 if (vcc->qos.aal != ATM_AAL0) {
29526 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29527 card->name, vpi, vci);
29528- atomic_inc(&vcc->stats->rx_drop);
29529+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29530 goto drop;
29531 }
29532
29533 if ((sb = dev_alloc_skb(64)) == NULL) {
29534 printk("%s: Can't allocate buffers for AAL0.\n",
29535 card->name);
29536- atomic_inc(&vcc->stats->rx_err);
29537+ atomic_inc_unchecked(&vcc->stats->rx_err);
29538 goto drop;
29539 }
29540
29541@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29542 ATM_SKB(sb)->vcc = vcc;
29543 __net_timestamp(sb);
29544 vcc->push(vcc, sb);
29545- atomic_inc(&vcc->stats->rx);
29546+ atomic_inc_unchecked(&vcc->stats->rx);
29547
29548 drop:
29549 skb_pull(queue, 64);
29550@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29551
29552 if (vc == NULL) {
29553 printk("%s: NULL connection in send().\n", card->name);
29554- atomic_inc(&vcc->stats->tx_err);
29555+ atomic_inc_unchecked(&vcc->stats->tx_err);
29556 dev_kfree_skb(skb);
29557 return -EINVAL;
29558 }
29559 if (!test_bit(VCF_TX, &vc->flags)) {
29560 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29561- atomic_inc(&vcc->stats->tx_err);
29562+ atomic_inc_unchecked(&vcc->stats->tx_err);
29563 dev_kfree_skb(skb);
29564 return -EINVAL;
29565 }
29566@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29567 break;
29568 default:
29569 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29570- atomic_inc(&vcc->stats->tx_err);
29571+ atomic_inc_unchecked(&vcc->stats->tx_err);
29572 dev_kfree_skb(skb);
29573 return -EINVAL;
29574 }
29575
29576 if (skb_shinfo(skb)->nr_frags != 0) {
29577 printk("%s: No scatter-gather yet.\n", card->name);
29578- atomic_inc(&vcc->stats->tx_err);
29579+ atomic_inc_unchecked(&vcc->stats->tx_err);
29580 dev_kfree_skb(skb);
29581 return -EINVAL;
29582 }
29583@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29584
29585 err = queue_skb(card, vc, skb, oam);
29586 if (err) {
29587- atomic_inc(&vcc->stats->tx_err);
29588+ atomic_inc_unchecked(&vcc->stats->tx_err);
29589 dev_kfree_skb(skb);
29590 return err;
29591 }
29592@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29593 skb = dev_alloc_skb(64);
29594 if (!skb) {
29595 printk("%s: Out of memory in send_oam().\n", card->name);
29596- atomic_inc(&vcc->stats->tx_err);
29597+ atomic_inc_unchecked(&vcc->stats->tx_err);
29598 return -ENOMEM;
29599 }
29600 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29601diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29602index b2c1b37..faa672b 100644
29603--- a/drivers/atm/iphase.c
29604+++ b/drivers/atm/iphase.c
29605@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29606 status = (u_short) (buf_desc_ptr->desc_mode);
29607 if (status & (RX_CER | RX_PTE | RX_OFL))
29608 {
29609- atomic_inc(&vcc->stats->rx_err);
29610+ atomic_inc_unchecked(&vcc->stats->rx_err);
29611 IF_ERR(printk("IA: bad packet, dropping it");)
29612 if (status & RX_CER) {
29613 IF_ERR(printk(" cause: packet CRC error\n");)
29614@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29615 len = dma_addr - buf_addr;
29616 if (len > iadev->rx_buf_sz) {
29617 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29618- atomic_inc(&vcc->stats->rx_err);
29619+ atomic_inc_unchecked(&vcc->stats->rx_err);
29620 goto out_free_desc;
29621 }
29622
29623@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29624 ia_vcc = INPH_IA_VCC(vcc);
29625 if (ia_vcc == NULL)
29626 {
29627- atomic_inc(&vcc->stats->rx_err);
29628+ atomic_inc_unchecked(&vcc->stats->rx_err);
29629 dev_kfree_skb_any(skb);
29630 atm_return(vcc, atm_guess_pdu2truesize(len));
29631 goto INCR_DLE;
29632@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29633 if ((length > iadev->rx_buf_sz) || (length >
29634 (skb->len - sizeof(struct cpcs_trailer))))
29635 {
29636- atomic_inc(&vcc->stats->rx_err);
29637+ atomic_inc_unchecked(&vcc->stats->rx_err);
29638 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29639 length, skb->len);)
29640 dev_kfree_skb_any(skb);
29641@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29642
29643 IF_RX(printk("rx_dle_intr: skb push");)
29644 vcc->push(vcc,skb);
29645- atomic_inc(&vcc->stats->rx);
29646+ atomic_inc_unchecked(&vcc->stats->rx);
29647 iadev->rx_pkt_cnt++;
29648 }
29649 INCR_DLE:
29650@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29651 {
29652 struct k_sonet_stats *stats;
29653 stats = &PRIV(_ia_dev[board])->sonet_stats;
29654- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29655- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29656- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29657- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29658- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29659- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29660- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29661- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29662- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29663+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29664+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29665+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29666+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29667+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29668+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29669+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29670+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29671+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29672 }
29673 ia_cmds.status = 0;
29674 break;
29675@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29676 if ((desc == 0) || (desc > iadev->num_tx_desc))
29677 {
29678 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29679- atomic_inc(&vcc->stats->tx);
29680+ atomic_inc_unchecked(&vcc->stats->tx);
29681 if (vcc->pop)
29682 vcc->pop(vcc, skb);
29683 else
29684@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29685 ATM_DESC(skb) = vcc->vci;
29686 skb_queue_tail(&iadev->tx_dma_q, skb);
29687
29688- atomic_inc(&vcc->stats->tx);
29689+ atomic_inc_unchecked(&vcc->stats->tx);
29690 iadev->tx_pkt_cnt++;
29691 /* Increment transaction counter */
29692 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29693
29694 #if 0
29695 /* add flow control logic */
29696- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29697+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29698 if (iavcc->vc_desc_cnt > 10) {
29699 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29700 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29701diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29702index cf97c34..8d30655 100644
29703--- a/drivers/atm/lanai.c
29704+++ b/drivers/atm/lanai.c
29705@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29706 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29707 lanai_endtx(lanai, lvcc);
29708 lanai_free_skb(lvcc->tx.atmvcc, skb);
29709- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29710+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29711 }
29712
29713 /* Try to fill the buffer - don't call unless there is backlog */
29714@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29715 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29716 __net_timestamp(skb);
29717 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29718- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29719+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29720 out:
29721 lvcc->rx.buf.ptr = end;
29722 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29723@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29724 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29725 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29726 lanai->stats.service_rxnotaal5++;
29727- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29728+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29729 return 0;
29730 }
29731 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29732@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29733 int bytes;
29734 read_unlock(&vcc_sklist_lock);
29735 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29736- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29737+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29738 lvcc->stats.x.aal5.service_trash++;
29739 bytes = (SERVICE_GET_END(s) * 16) -
29740 (((unsigned long) lvcc->rx.buf.ptr) -
29741@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29742 }
29743 if (s & SERVICE_STREAM) {
29744 read_unlock(&vcc_sklist_lock);
29745- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29746+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29747 lvcc->stats.x.aal5.service_stream++;
29748 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29749 "PDU on VCI %d!\n", lanai->number, vci);
29750@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29751 return 0;
29752 }
29753 DPRINTK("got rx crc error on vci %d\n", vci);
29754- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29755+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29756 lvcc->stats.x.aal5.service_rxcrc++;
29757 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29758 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29759diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29760index 3da804b..d3b0eed 100644
29761--- a/drivers/atm/nicstar.c
29762+++ b/drivers/atm/nicstar.c
29763@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29764 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29765 {
29766 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29767- atomic_inc(&vcc->stats->tx_err);
29768+ atomic_inc_unchecked(&vcc->stats->tx_err);
29769 dev_kfree_skb_any(skb);
29770 return -EINVAL;
29771 }
29772@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29773 if (!vc->tx)
29774 {
29775 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29776- atomic_inc(&vcc->stats->tx_err);
29777+ atomic_inc_unchecked(&vcc->stats->tx_err);
29778 dev_kfree_skb_any(skb);
29779 return -EINVAL;
29780 }
29781@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29782 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29783 {
29784 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
29785- atomic_inc(&vcc->stats->tx_err);
29786+ atomic_inc_unchecked(&vcc->stats->tx_err);
29787 dev_kfree_skb_any(skb);
29788 return -EINVAL;
29789 }
29790@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29791 if (skb_shinfo(skb)->nr_frags != 0)
29792 {
29793 printk("nicstar%d: No scatter-gather yet.\n", card->index);
29794- atomic_inc(&vcc->stats->tx_err);
29795+ atomic_inc_unchecked(&vcc->stats->tx_err);
29796 dev_kfree_skb_any(skb);
29797 return -EINVAL;
29798 }
29799@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29800
29801 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
29802 {
29803- atomic_inc(&vcc->stats->tx_err);
29804+ atomic_inc_unchecked(&vcc->stats->tx_err);
29805 dev_kfree_skb_any(skb);
29806 return -EIO;
29807 }
29808- atomic_inc(&vcc->stats->tx);
29809+ atomic_inc_unchecked(&vcc->stats->tx);
29810
29811 return 0;
29812 }
29813@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29814 {
29815 printk("nicstar%d: Can't allocate buffers for aal0.\n",
29816 card->index);
29817- atomic_add(i,&vcc->stats->rx_drop);
29818+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
29819 break;
29820 }
29821 if (!atm_charge(vcc, sb->truesize))
29822 {
29823 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
29824 card->index);
29825- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29826+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29827 dev_kfree_skb_any(sb);
29828 break;
29829 }
29830@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29831 ATM_SKB(sb)->vcc = vcc;
29832 __net_timestamp(sb);
29833 vcc->push(vcc, sb);
29834- atomic_inc(&vcc->stats->rx);
29835+ atomic_inc_unchecked(&vcc->stats->rx);
29836 cell += ATM_CELL_PAYLOAD;
29837 }
29838
29839@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29840 if (iovb == NULL)
29841 {
29842 printk("nicstar%d: Out of iovec buffers.\n", card->index);
29843- atomic_inc(&vcc->stats->rx_drop);
29844+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29845 recycle_rx_buf(card, skb);
29846 return;
29847 }
29848@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29849 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
29850 {
29851 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
29852- atomic_inc(&vcc->stats->rx_err);
29853+ atomic_inc_unchecked(&vcc->stats->rx_err);
29854 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
29855 NS_SKB(iovb)->iovcnt = 0;
29856 iovb->len = 0;
29857@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29858 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
29859 card->index);
29860 which_list(card, skb);
29861- atomic_inc(&vcc->stats->rx_err);
29862+ atomic_inc_unchecked(&vcc->stats->rx_err);
29863 recycle_rx_buf(card, skb);
29864 vc->rx_iov = NULL;
29865 recycle_iov_buf(card, iovb);
29866@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29867 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
29868 card->index);
29869 which_list(card, skb);
29870- atomic_inc(&vcc->stats->rx_err);
29871+ atomic_inc_unchecked(&vcc->stats->rx_err);
29872 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29873 NS_SKB(iovb)->iovcnt);
29874 vc->rx_iov = NULL;
29875@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29876 printk(" - PDU size mismatch.\n");
29877 else
29878 printk(".\n");
29879- atomic_inc(&vcc->stats->rx_err);
29880+ atomic_inc_unchecked(&vcc->stats->rx_err);
29881 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29882 NS_SKB(iovb)->iovcnt);
29883 vc->rx_iov = NULL;
29884@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29885 if (!atm_charge(vcc, skb->truesize))
29886 {
29887 push_rxbufs(card, skb);
29888- atomic_inc(&vcc->stats->rx_drop);
29889+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29890 }
29891 else
29892 {
29893@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29894 ATM_SKB(skb)->vcc = vcc;
29895 __net_timestamp(skb);
29896 vcc->push(vcc, skb);
29897- atomic_inc(&vcc->stats->rx);
29898+ atomic_inc_unchecked(&vcc->stats->rx);
29899 }
29900 }
29901 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
29902@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29903 if (!atm_charge(vcc, sb->truesize))
29904 {
29905 push_rxbufs(card, sb);
29906- atomic_inc(&vcc->stats->rx_drop);
29907+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29908 }
29909 else
29910 {
29911@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29912 ATM_SKB(sb)->vcc = vcc;
29913 __net_timestamp(sb);
29914 vcc->push(vcc, sb);
29915- atomic_inc(&vcc->stats->rx);
29916+ atomic_inc_unchecked(&vcc->stats->rx);
29917 }
29918
29919 push_rxbufs(card, skb);
29920@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29921 if (!atm_charge(vcc, skb->truesize))
29922 {
29923 push_rxbufs(card, skb);
29924- atomic_inc(&vcc->stats->rx_drop);
29925+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29926 }
29927 else
29928 {
29929@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29930 ATM_SKB(skb)->vcc = vcc;
29931 __net_timestamp(skb);
29932 vcc->push(vcc, skb);
29933- atomic_inc(&vcc->stats->rx);
29934+ atomic_inc_unchecked(&vcc->stats->rx);
29935 }
29936
29937 push_rxbufs(card, sb);
29938@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29939 if (hb == NULL)
29940 {
29941 printk("nicstar%d: Out of huge buffers.\n", card->index);
29942- atomic_inc(&vcc->stats->rx_drop);
29943+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29944 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29945 NS_SKB(iovb)->iovcnt);
29946 vc->rx_iov = NULL;
29947@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29948 }
29949 else
29950 dev_kfree_skb_any(hb);
29951- atomic_inc(&vcc->stats->rx_drop);
29952+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29953 }
29954 else
29955 {
29956@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29957 #endif /* NS_USE_DESTRUCTORS */
29958 __net_timestamp(hb);
29959 vcc->push(vcc, hb);
29960- atomic_inc(&vcc->stats->rx);
29961+ atomic_inc_unchecked(&vcc->stats->rx);
29962 }
29963 }
29964
29965diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
29966index 84c93ff..e6ed269 100644
29967--- a/drivers/atm/solos-pci.c
29968+++ b/drivers/atm/solos-pci.c
29969@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
29970 }
29971 atm_charge(vcc, skb->truesize);
29972 vcc->push(vcc, skb);
29973- atomic_inc(&vcc->stats->rx);
29974+ atomic_inc_unchecked(&vcc->stats->rx);
29975 break;
29976
29977 case PKT_STATUS:
29978@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
29979 char msg[500];
29980 char item[10];
29981
29982+ pax_track_stack();
29983+
29984 len = buf->len;
29985 for (i = 0; i < len; i++){
29986 if(i % 8 == 0)
29987@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
29988 vcc = SKB_CB(oldskb)->vcc;
29989
29990 if (vcc) {
29991- atomic_inc(&vcc->stats->tx);
29992+ atomic_inc_unchecked(&vcc->stats->tx);
29993 solos_pop(vcc, oldskb);
29994 } else
29995 dev_kfree_skb_irq(oldskb);
29996diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
29997index 6dd3f59..ee377f3 100644
29998--- a/drivers/atm/suni.c
29999+++ b/drivers/atm/suni.c
30000@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30001
30002
30003 #define ADD_LIMITED(s,v) \
30004- atomic_add((v),&stats->s); \
30005- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30006+ atomic_add_unchecked((v),&stats->s); \
30007+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30008
30009
30010 static void suni_hz(unsigned long from_timer)
30011diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30012index fc8cb07..4a80e53 100644
30013--- a/drivers/atm/uPD98402.c
30014+++ b/drivers/atm/uPD98402.c
30015@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30016 struct sonet_stats tmp;
30017 int error = 0;
30018
30019- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30020+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30021 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30022 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30023 if (zero && !error) {
30024@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30025
30026
30027 #define ADD_LIMITED(s,v) \
30028- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30029- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30030- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30031+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30032+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30033+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30034
30035
30036 static void stat_event(struct atm_dev *dev)
30037@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30038 if (reason & uPD98402_INT_PFM) stat_event(dev);
30039 if (reason & uPD98402_INT_PCO) {
30040 (void) GET(PCOCR); /* clear interrupt cause */
30041- atomic_add(GET(HECCT),
30042+ atomic_add_unchecked(GET(HECCT),
30043 &PRIV(dev)->sonet_stats.uncorr_hcs);
30044 }
30045 if ((reason & uPD98402_INT_RFO) &&
30046@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30047 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30048 uPD98402_INT_LOS),PIMR); /* enable them */
30049 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30050- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30051- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30052- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30053+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30054+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30055+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30056 return 0;
30057 }
30058
30059diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30060index 2e9635b..32927b4 100644
30061--- a/drivers/atm/zatm.c
30062+++ b/drivers/atm/zatm.c
30063@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30064 }
30065 if (!size) {
30066 dev_kfree_skb_irq(skb);
30067- if (vcc) atomic_inc(&vcc->stats->rx_err);
30068+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30069 continue;
30070 }
30071 if (!atm_charge(vcc,skb->truesize)) {
30072@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30073 skb->len = size;
30074 ATM_SKB(skb)->vcc = vcc;
30075 vcc->push(vcc,skb);
30076- atomic_inc(&vcc->stats->rx);
30077+ atomic_inc_unchecked(&vcc->stats->rx);
30078 }
30079 zout(pos & 0xffff,MTA(mbx));
30080 #if 0 /* probably a stupid idea */
30081@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30082 skb_queue_head(&zatm_vcc->backlog,skb);
30083 break;
30084 }
30085- atomic_inc(&vcc->stats->tx);
30086+ atomic_inc_unchecked(&vcc->stats->tx);
30087 wake_up(&zatm_vcc->tx_wait);
30088 }
30089
30090diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30091index 63c143e..fece183 100644
30092--- a/drivers/base/bus.c
30093+++ b/drivers/base/bus.c
30094@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30095 return ret;
30096 }
30097
30098-static struct sysfs_ops driver_sysfs_ops = {
30099+static const struct sysfs_ops driver_sysfs_ops = {
30100 .show = drv_attr_show,
30101 .store = drv_attr_store,
30102 };
30103@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30104 return ret;
30105 }
30106
30107-static struct sysfs_ops bus_sysfs_ops = {
30108+static const struct sysfs_ops bus_sysfs_ops = {
30109 .show = bus_attr_show,
30110 .store = bus_attr_store,
30111 };
30112@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30113 return 0;
30114 }
30115
30116-static struct kset_uevent_ops bus_uevent_ops = {
30117+static const struct kset_uevent_ops bus_uevent_ops = {
30118 .filter = bus_uevent_filter,
30119 };
30120
30121diff --git a/drivers/base/class.c b/drivers/base/class.c
30122index 6e2c3b0..cb61871 100644
30123--- a/drivers/base/class.c
30124+++ b/drivers/base/class.c
30125@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30126 kfree(cp);
30127 }
30128
30129-static struct sysfs_ops class_sysfs_ops = {
30130+static const struct sysfs_ops class_sysfs_ops = {
30131 .show = class_attr_show,
30132 .store = class_attr_store,
30133 };
30134diff --git a/drivers/base/core.c b/drivers/base/core.c
30135index f33d768..a9358d0 100644
30136--- a/drivers/base/core.c
30137+++ b/drivers/base/core.c
30138@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30139 return ret;
30140 }
30141
30142-static struct sysfs_ops dev_sysfs_ops = {
30143+static const struct sysfs_ops dev_sysfs_ops = {
30144 .show = dev_attr_show,
30145 .store = dev_attr_store,
30146 };
30147@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30148 return retval;
30149 }
30150
30151-static struct kset_uevent_ops device_uevent_ops = {
30152+static const struct kset_uevent_ops device_uevent_ops = {
30153 .filter = dev_uevent_filter,
30154 .name = dev_uevent_name,
30155 .uevent = dev_uevent,
30156diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30157index 989429c..2272b00 100644
30158--- a/drivers/base/memory.c
30159+++ b/drivers/base/memory.c
30160@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30161 return retval;
30162 }
30163
30164-static struct kset_uevent_ops memory_uevent_ops = {
30165+static const struct kset_uevent_ops memory_uevent_ops = {
30166 .name = memory_uevent_name,
30167 .uevent = memory_uevent,
30168 };
30169diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30170index 3f202f7..61c4a6f 100644
30171--- a/drivers/base/sys.c
30172+++ b/drivers/base/sys.c
30173@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30174 return -EIO;
30175 }
30176
30177-static struct sysfs_ops sysfs_ops = {
30178+static const struct sysfs_ops sysfs_ops = {
30179 .show = sysdev_show,
30180 .store = sysdev_store,
30181 };
30182@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30183 return -EIO;
30184 }
30185
30186-static struct sysfs_ops sysfs_class_ops = {
30187+static const struct sysfs_ops sysfs_class_ops = {
30188 .show = sysdev_class_show,
30189 .store = sysdev_class_store,
30190 };
30191diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30192index eb4fa19..1954777 100644
30193--- a/drivers/block/DAC960.c
30194+++ b/drivers/block/DAC960.c
30195@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30196 unsigned long flags;
30197 int Channel, TargetID;
30198
30199+ pax_track_stack();
30200+
30201 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30202 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30203 sizeof(DAC960_SCSI_Inquiry_T) +
30204diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30205index ca9c548..ca6899c 100644
30206--- a/drivers/block/cciss.c
30207+++ b/drivers/block/cciss.c
30208@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30209 int err;
30210 u32 cp;
30211
30212+ memset(&arg64, 0, sizeof(arg64));
30213+
30214 err = 0;
30215 err |=
30216 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30217@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30218 /* Wait (up to 20 seconds) for a command to complete */
30219
30220 for (i = 20 * HZ; i > 0; i--) {
30221- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30222+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30223 if (done == FIFO_EMPTY)
30224 schedule_timeout_uninterruptible(1);
30225 else
30226@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30227 resend_cmd1:
30228
30229 /* Disable interrupt on the board. */
30230- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30231+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30232
30233 /* Make sure there is room in the command FIFO */
30234 /* Actually it should be completely empty at this time */
30235@@ -2884,13 +2886,13 @@ resend_cmd1:
30236 /* tape side of the driver. */
30237 for (i = 200000; i > 0; i--) {
30238 /* if fifo isn't full go */
30239- if (!(h->access.fifo_full(h)))
30240+ if (!(h->access->fifo_full(h)))
30241 break;
30242 udelay(10);
30243 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30244 " waiting!\n", h->ctlr);
30245 }
30246- h->access.submit_command(h, c); /* Send the cmd */
30247+ h->access->submit_command(h, c); /* Send the cmd */
30248 do {
30249 complete = pollcomplete(h->ctlr);
30250
30251@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30252 while (!hlist_empty(&h->reqQ)) {
30253 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30254 /* can't do anything if fifo is full */
30255- if ((h->access.fifo_full(h))) {
30256+ if ((h->access->fifo_full(h))) {
30257 printk(KERN_WARNING "cciss: fifo full\n");
30258 break;
30259 }
30260@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30261 h->Qdepth--;
30262
30263 /* Tell the controller execute command */
30264- h->access.submit_command(h, c);
30265+ h->access->submit_command(h, c);
30266
30267 /* Put job onto the completed Q */
30268 addQ(&h->cmpQ, c);
30269@@ -3393,17 +3395,17 @@ startio:
30270
30271 static inline unsigned long get_next_completion(ctlr_info_t *h)
30272 {
30273- return h->access.command_completed(h);
30274+ return h->access->command_completed(h);
30275 }
30276
30277 static inline int interrupt_pending(ctlr_info_t *h)
30278 {
30279- return h->access.intr_pending(h);
30280+ return h->access->intr_pending(h);
30281 }
30282
30283 static inline long interrupt_not_for_us(ctlr_info_t *h)
30284 {
30285- return (((h->access.intr_pending(h) == 0) ||
30286+ return (((h->access->intr_pending(h) == 0) ||
30287 (h->interrupts_enabled == 0)));
30288 }
30289
30290@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30291 */
30292 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30293 c->product_name = products[prod_index].product_name;
30294- c->access = *(products[prod_index].access);
30295+ c->access = products[prod_index].access;
30296 c->nr_cmds = c->max_commands - 4;
30297 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30298 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30299@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30300 }
30301
30302 /* make sure the board interrupts are off */
30303- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30304+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30305 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30306 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30307 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30308@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30309 cciss_scsi_setup(i);
30310
30311 /* Turn the interrupts on so we can service requests */
30312- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30313+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30314
30315 /* Get the firmware version */
30316 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30317diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30318index 04d6bf8..36e712d 100644
30319--- a/drivers/block/cciss.h
30320+++ b/drivers/block/cciss.h
30321@@ -90,7 +90,7 @@ struct ctlr_info
30322 // information about each logical volume
30323 drive_info_struct *drv[CISS_MAX_LUN];
30324
30325- struct access_method access;
30326+ struct access_method *access;
30327
30328 /* queue and queue Info */
30329 struct hlist_head reqQ;
30330diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30331index 6422651..bb1bdef 100644
30332--- a/drivers/block/cpqarray.c
30333+++ b/drivers/block/cpqarray.c
30334@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30335 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30336 goto Enomem4;
30337 }
30338- hba[i]->access.set_intr_mask(hba[i], 0);
30339+ hba[i]->access->set_intr_mask(hba[i], 0);
30340 if (request_irq(hba[i]->intr, do_ida_intr,
30341 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30342 {
30343@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30344 add_timer(&hba[i]->timer);
30345
30346 /* Enable IRQ now that spinlock and rate limit timer are set up */
30347- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30348+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30349
30350 for(j=0; j<NWD; j++) {
30351 struct gendisk *disk = ida_gendisk[i][j];
30352@@ -695,7 +695,7 @@ DBGINFO(
30353 for(i=0; i<NR_PRODUCTS; i++) {
30354 if (board_id == products[i].board_id) {
30355 c->product_name = products[i].product_name;
30356- c->access = *(products[i].access);
30357+ c->access = products[i].access;
30358 break;
30359 }
30360 }
30361@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30362 hba[ctlr]->intr = intr;
30363 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30364 hba[ctlr]->product_name = products[j].product_name;
30365- hba[ctlr]->access = *(products[j].access);
30366+ hba[ctlr]->access = products[j].access;
30367 hba[ctlr]->ctlr = ctlr;
30368 hba[ctlr]->board_id = board_id;
30369 hba[ctlr]->pci_dev = NULL; /* not PCI */
30370@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30371 struct scatterlist tmp_sg[SG_MAX];
30372 int i, dir, seg;
30373
30374+ pax_track_stack();
30375+
30376 if (blk_queue_plugged(q))
30377 goto startio;
30378
30379@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30380
30381 while((c = h->reqQ) != NULL) {
30382 /* Can't do anything if we're busy */
30383- if (h->access.fifo_full(h) == 0)
30384+ if (h->access->fifo_full(h) == 0)
30385 return;
30386
30387 /* Get the first entry from the request Q */
30388@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30389 h->Qdepth--;
30390
30391 /* Tell the controller to do our bidding */
30392- h->access.submit_command(h, c);
30393+ h->access->submit_command(h, c);
30394
30395 /* Get onto the completion Q */
30396 addQ(&h->cmpQ, c);
30397@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30398 unsigned long flags;
30399 __u32 a,a1;
30400
30401- istat = h->access.intr_pending(h);
30402+ istat = h->access->intr_pending(h);
30403 /* Is this interrupt for us? */
30404 if (istat == 0)
30405 return IRQ_NONE;
30406@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30407 */
30408 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30409 if (istat & FIFO_NOT_EMPTY) {
30410- while((a = h->access.command_completed(h))) {
30411+ while((a = h->access->command_completed(h))) {
30412 a1 = a; a &= ~3;
30413 if ((c = h->cmpQ) == NULL)
30414 {
30415@@ -1434,11 +1436,11 @@ static int sendcmd(
30416 /*
30417 * Disable interrupt
30418 */
30419- info_p->access.set_intr_mask(info_p, 0);
30420+ info_p->access->set_intr_mask(info_p, 0);
30421 /* Make sure there is room in the command FIFO */
30422 /* Actually it should be completely empty at this time. */
30423 for (i = 200000; i > 0; i--) {
30424- temp = info_p->access.fifo_full(info_p);
30425+ temp = info_p->access->fifo_full(info_p);
30426 if (temp != 0) {
30427 break;
30428 }
30429@@ -1451,7 +1453,7 @@ DBG(
30430 /*
30431 * Send the cmd
30432 */
30433- info_p->access.submit_command(info_p, c);
30434+ info_p->access->submit_command(info_p, c);
30435 complete = pollcomplete(ctlr);
30436
30437 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30438@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30439 * we check the new geometry. Then turn interrupts back on when
30440 * we're done.
30441 */
30442- host->access.set_intr_mask(host, 0);
30443+ host->access->set_intr_mask(host, 0);
30444 getgeometry(ctlr);
30445- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30446+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30447
30448 for(i=0; i<NWD; i++) {
30449 struct gendisk *disk = ida_gendisk[ctlr][i];
30450@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30451 /* Wait (up to 2 seconds) for a command to complete */
30452
30453 for (i = 200000; i > 0; i--) {
30454- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30455+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30456 if (done == 0) {
30457 udelay(10); /* a short fixed delay */
30458 } else
30459diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30460index be73e9d..7fbf140 100644
30461--- a/drivers/block/cpqarray.h
30462+++ b/drivers/block/cpqarray.h
30463@@ -99,7 +99,7 @@ struct ctlr_info {
30464 drv_info_t drv[NWD];
30465 struct proc_dir_entry *proc;
30466
30467- struct access_method access;
30468+ struct access_method *access;
30469
30470 cmdlist_t *reqQ;
30471 cmdlist_t *cmpQ;
30472diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30473index 8ec2d70..2804b30 100644
30474--- a/drivers/block/loop.c
30475+++ b/drivers/block/loop.c
30476@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30477 mm_segment_t old_fs = get_fs();
30478
30479 set_fs(get_ds());
30480- bw = file->f_op->write(file, buf, len, &pos);
30481+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30482 set_fs(old_fs);
30483 if (likely(bw == len))
30484 return 0;
30485diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30486index 26ada47..083c480 100644
30487--- a/drivers/block/nbd.c
30488+++ b/drivers/block/nbd.c
30489@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30490 struct kvec iov;
30491 sigset_t blocked, oldset;
30492
30493+ pax_track_stack();
30494+
30495 if (unlikely(!sock)) {
30496 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30497 lo->disk->disk_name, (send ? "send" : "recv"));
30498@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30499 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30500 unsigned int cmd, unsigned long arg)
30501 {
30502+ pax_track_stack();
30503+
30504 switch (cmd) {
30505 case NBD_DISCONNECT: {
30506 struct request sreq;
30507diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30508index a5d585d..d087be3 100644
30509--- a/drivers/block/pktcdvd.c
30510+++ b/drivers/block/pktcdvd.c
30511@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30512 return len;
30513 }
30514
30515-static struct sysfs_ops kobj_pkt_ops = {
30516+static const struct sysfs_ops kobj_pkt_ops = {
30517 .show = kobj_pkt_show,
30518 .store = kobj_pkt_store
30519 };
30520diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30521index 6aad99e..89cd142 100644
30522--- a/drivers/char/Kconfig
30523+++ b/drivers/char/Kconfig
30524@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30525
30526 config DEVKMEM
30527 bool "/dev/kmem virtual device support"
30528- default y
30529+ default n
30530+ depends on !GRKERNSEC_KMEM
30531 help
30532 Say Y here if you want to support the /dev/kmem device. The
30533 /dev/kmem device is rarely used, but can be used for certain
30534@@ -1114,6 +1115,7 @@ config DEVPORT
30535 bool
30536 depends on !M68K
30537 depends on ISA || PCI
30538+ depends on !GRKERNSEC_KMEM
30539 default y
30540
30541 source "drivers/s390/char/Kconfig"
30542diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30543index a96f319..a778a5b 100644
30544--- a/drivers/char/agp/frontend.c
30545+++ b/drivers/char/agp/frontend.c
30546@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30547 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30548 return -EFAULT;
30549
30550- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30551+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30552 return -EFAULT;
30553
30554 client = agp_find_client_by_pid(reserve.pid);
30555diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30556index d8cff90..9628e70 100644
30557--- a/drivers/char/briq_panel.c
30558+++ b/drivers/char/briq_panel.c
30559@@ -10,6 +10,7 @@
30560 #include <linux/types.h>
30561 #include <linux/errno.h>
30562 #include <linux/tty.h>
30563+#include <linux/mutex.h>
30564 #include <linux/timer.h>
30565 #include <linux/kernel.h>
30566 #include <linux/wait.h>
30567@@ -36,6 +37,7 @@ static int vfd_is_open;
30568 static unsigned char vfd[40];
30569 static int vfd_cursor;
30570 static unsigned char ledpb, led;
30571+static DEFINE_MUTEX(vfd_mutex);
30572
30573 static void update_vfd(void)
30574 {
30575@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30576 if (!vfd_is_open)
30577 return -EBUSY;
30578
30579+ mutex_lock(&vfd_mutex);
30580 for (;;) {
30581 char c;
30582 if (!indx)
30583 break;
30584- if (get_user(c, buf))
30585+ if (get_user(c, buf)) {
30586+ mutex_unlock(&vfd_mutex);
30587 return -EFAULT;
30588+ }
30589 if (esc) {
30590 set_led(c);
30591 esc = 0;
30592@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30593 buf++;
30594 }
30595 update_vfd();
30596+ mutex_unlock(&vfd_mutex);
30597
30598 return len;
30599 }
30600diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30601index 31e7c91..161afc0 100644
30602--- a/drivers/char/genrtc.c
30603+++ b/drivers/char/genrtc.c
30604@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30605 switch (cmd) {
30606
30607 case RTC_PLL_GET:
30608+ memset(&pll, 0, sizeof(pll));
30609 if (get_rtc_pll(&pll))
30610 return -EINVAL;
30611 else
30612diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30613index 006466d..a2bb21c 100644
30614--- a/drivers/char/hpet.c
30615+++ b/drivers/char/hpet.c
30616@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30617 return 0;
30618 }
30619
30620-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30621+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30622
30623 static int
30624 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30625@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30626 }
30627
30628 static int
30629-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30630+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30631 {
30632 struct hpet_timer __iomem *timer;
30633 struct hpet __iomem *hpet;
30634@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30635 {
30636 struct hpet_info info;
30637
30638+ memset(&info, 0, sizeof(info));
30639+
30640 if (devp->hd_ireqfreq)
30641 info.hi_ireqfreq =
30642 hpet_time_div(hpetp, devp->hd_ireqfreq);
30643- else
30644- info.hi_ireqfreq = 0;
30645 info.hi_flags =
30646 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30647 info.hi_hpet = hpetp->hp_which;
30648diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30649index 0afc8b8..6913fc3 100644
30650--- a/drivers/char/hvc_beat.c
30651+++ b/drivers/char/hvc_beat.c
30652@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30653 return cnt;
30654 }
30655
30656-static struct hv_ops hvc_beat_get_put_ops = {
30657+static const struct hv_ops hvc_beat_get_put_ops = {
30658 .get_chars = hvc_beat_get_chars,
30659 .put_chars = hvc_beat_put_chars,
30660 };
30661diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30662index 98097f2..407dddc 100644
30663--- a/drivers/char/hvc_console.c
30664+++ b/drivers/char/hvc_console.c
30665@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30666 * console interfaces but can still be used as a tty device. This has to be
30667 * static because kmalloc will not work during early console init.
30668 */
30669-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30670+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30671 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30672 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30673
30674@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30675 * vty adapters do NOT get an hvc_instantiate() callback since they
30676 * appear after early console init.
30677 */
30678-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30679+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30680 {
30681 struct hvc_struct *hp;
30682
30683@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30684 };
30685
30686 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30687- struct hv_ops *ops, int outbuf_size)
30688+ const struct hv_ops *ops, int outbuf_size)
30689 {
30690 struct hvc_struct *hp;
30691 int i;
30692diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30693index 10950ca..ed176c3 100644
30694--- a/drivers/char/hvc_console.h
30695+++ b/drivers/char/hvc_console.h
30696@@ -55,7 +55,7 @@ struct hvc_struct {
30697 int outbuf_size;
30698 int n_outbuf;
30699 uint32_t vtermno;
30700- struct hv_ops *ops;
30701+ const struct hv_ops *ops;
30702 int irq_requested;
30703 int data;
30704 struct winsize ws;
30705@@ -76,11 +76,11 @@ struct hv_ops {
30706 };
30707
30708 /* Register a vterm and a slot index for use as a console (console_init) */
30709-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30710+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30711
30712 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30713 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30714- struct hv_ops *ops, int outbuf_size);
30715+ const struct hv_ops *ops, int outbuf_size);
30716 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30717 extern int hvc_remove(struct hvc_struct *hp);
30718
30719diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30720index 936d05b..fd02426 100644
30721--- a/drivers/char/hvc_iseries.c
30722+++ b/drivers/char/hvc_iseries.c
30723@@ -197,7 +197,7 @@ done:
30724 return sent;
30725 }
30726
30727-static struct hv_ops hvc_get_put_ops = {
30728+static const struct hv_ops hvc_get_put_ops = {
30729 .get_chars = get_chars,
30730 .put_chars = put_chars,
30731 .notifier_add = notifier_add_irq,
30732diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30733index b0e168f..69cda2a 100644
30734--- a/drivers/char/hvc_iucv.c
30735+++ b/drivers/char/hvc_iucv.c
30736@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30737
30738
30739 /* HVC operations */
30740-static struct hv_ops hvc_iucv_ops = {
30741+static const struct hv_ops hvc_iucv_ops = {
30742 .get_chars = hvc_iucv_get_chars,
30743 .put_chars = hvc_iucv_put_chars,
30744 .notifier_add = hvc_iucv_notifier_add,
30745diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30746index 88590d0..61c4a61 100644
30747--- a/drivers/char/hvc_rtas.c
30748+++ b/drivers/char/hvc_rtas.c
30749@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30750 return i;
30751 }
30752
30753-static struct hv_ops hvc_rtas_get_put_ops = {
30754+static const struct hv_ops hvc_rtas_get_put_ops = {
30755 .get_chars = hvc_rtas_read_console,
30756 .put_chars = hvc_rtas_write_console,
30757 };
30758diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30759index bd63ba8..b0957e6 100644
30760--- a/drivers/char/hvc_udbg.c
30761+++ b/drivers/char/hvc_udbg.c
30762@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30763 return i;
30764 }
30765
30766-static struct hv_ops hvc_udbg_ops = {
30767+static const struct hv_ops hvc_udbg_ops = {
30768 .get_chars = hvc_udbg_get,
30769 .put_chars = hvc_udbg_put,
30770 };
30771diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30772index 10be343..27370e9 100644
30773--- a/drivers/char/hvc_vio.c
30774+++ b/drivers/char/hvc_vio.c
30775@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30776 return got;
30777 }
30778
30779-static struct hv_ops hvc_get_put_ops = {
30780+static const struct hv_ops hvc_get_put_ops = {
30781 .get_chars = filtered_get_chars,
30782 .put_chars = hvc_put_chars,
30783 .notifier_add = notifier_add_irq,
30784diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
30785index a6ee32b..94f8c26 100644
30786--- a/drivers/char/hvc_xen.c
30787+++ b/drivers/char/hvc_xen.c
30788@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
30789 return recv;
30790 }
30791
30792-static struct hv_ops hvc_ops = {
30793+static const struct hv_ops hvc_ops = {
30794 .get_chars = read_console,
30795 .put_chars = write_console,
30796 .notifier_add = notifier_add_irq,
30797diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
30798index 266b858..f3ee0bb 100644
30799--- a/drivers/char/hvcs.c
30800+++ b/drivers/char/hvcs.c
30801@@ -82,6 +82,7 @@
30802 #include <asm/hvcserver.h>
30803 #include <asm/uaccess.h>
30804 #include <asm/vio.h>
30805+#include <asm/local.h>
30806
30807 /*
30808 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
30809@@ -269,7 +270,7 @@ struct hvcs_struct {
30810 unsigned int index;
30811
30812 struct tty_struct *tty;
30813- int open_count;
30814+ local_t open_count;
30815
30816 /*
30817 * Used to tell the driver kernel_thread what operations need to take
30818@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
30819
30820 spin_lock_irqsave(&hvcsd->lock, flags);
30821
30822- if (hvcsd->open_count > 0) {
30823+ if (local_read(&hvcsd->open_count) > 0) {
30824 spin_unlock_irqrestore(&hvcsd->lock, flags);
30825 printk(KERN_INFO "HVCS: vterm state unchanged. "
30826 "The hvcs device node is still in use.\n");
30827@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
30828 if ((retval = hvcs_partner_connect(hvcsd)))
30829 goto error_release;
30830
30831- hvcsd->open_count = 1;
30832+ local_set(&hvcsd->open_count, 1);
30833 hvcsd->tty = tty;
30834 tty->driver_data = hvcsd;
30835
30836@@ -1169,7 +1170,7 @@ fast_open:
30837
30838 spin_lock_irqsave(&hvcsd->lock, flags);
30839 kref_get(&hvcsd->kref);
30840- hvcsd->open_count++;
30841+ local_inc(&hvcsd->open_count);
30842 hvcsd->todo_mask |= HVCS_SCHED_READ;
30843 spin_unlock_irqrestore(&hvcsd->lock, flags);
30844
30845@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30846 hvcsd = tty->driver_data;
30847
30848 spin_lock_irqsave(&hvcsd->lock, flags);
30849- if (--hvcsd->open_count == 0) {
30850+ if (local_dec_and_test(&hvcsd->open_count)) {
30851
30852 vio_disable_interrupts(hvcsd->vdev);
30853
30854@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30855 free_irq(irq, hvcsd);
30856 kref_put(&hvcsd->kref, destroy_hvcs_struct);
30857 return;
30858- } else if (hvcsd->open_count < 0) {
30859+ } else if (local_read(&hvcsd->open_count) < 0) {
30860 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
30861 " is missmanaged.\n",
30862- hvcsd->vdev->unit_address, hvcsd->open_count);
30863+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
30864 }
30865
30866 spin_unlock_irqrestore(&hvcsd->lock, flags);
30867@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30868
30869 spin_lock_irqsave(&hvcsd->lock, flags);
30870 /* Preserve this so that we know how many kref refs to put */
30871- temp_open_count = hvcsd->open_count;
30872+ temp_open_count = local_read(&hvcsd->open_count);
30873
30874 /*
30875 * Don't kref put inside the spinlock because the destruction
30876@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30877 hvcsd->tty->driver_data = NULL;
30878 hvcsd->tty = NULL;
30879
30880- hvcsd->open_count = 0;
30881+ local_set(&hvcsd->open_count, 0);
30882
30883 /* This will drop any buffered data on the floor which is OK in a hangup
30884 * scenario. */
30885@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
30886 * the middle of a write operation? This is a crummy place to do this
30887 * but we want to keep it all in the spinlock.
30888 */
30889- if (hvcsd->open_count <= 0) {
30890+ if (local_read(&hvcsd->open_count) <= 0) {
30891 spin_unlock_irqrestore(&hvcsd->lock, flags);
30892 return -ENODEV;
30893 }
30894@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
30895 {
30896 struct hvcs_struct *hvcsd = tty->driver_data;
30897
30898- if (!hvcsd || hvcsd->open_count <= 0)
30899+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
30900 return 0;
30901
30902 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
30903diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
30904index ec5e3f8..02455ba 100644
30905--- a/drivers/char/ipmi/ipmi_msghandler.c
30906+++ b/drivers/char/ipmi/ipmi_msghandler.c
30907@@ -414,7 +414,7 @@ struct ipmi_smi {
30908 struct proc_dir_entry *proc_dir;
30909 char proc_dir_name[10];
30910
30911- atomic_t stats[IPMI_NUM_STATS];
30912+ atomic_unchecked_t stats[IPMI_NUM_STATS];
30913
30914 /*
30915 * run_to_completion duplicate of smb_info, smi_info
30916@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
30917
30918
30919 #define ipmi_inc_stat(intf, stat) \
30920- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
30921+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
30922 #define ipmi_get_stat(intf, stat) \
30923- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
30924+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
30925
30926 static int is_lan_addr(struct ipmi_addr *addr)
30927 {
30928@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
30929 INIT_LIST_HEAD(&intf->cmd_rcvrs);
30930 init_waitqueue_head(&intf->waitq);
30931 for (i = 0; i < IPMI_NUM_STATS; i++)
30932- atomic_set(&intf->stats[i], 0);
30933+ atomic_set_unchecked(&intf->stats[i], 0);
30934
30935 intf->proc_dir = NULL;
30936
30937@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
30938 struct ipmi_smi_msg smi_msg;
30939 struct ipmi_recv_msg recv_msg;
30940
30941+ pax_track_stack();
30942+
30943 si = (struct ipmi_system_interface_addr *) &addr;
30944 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
30945 si->channel = IPMI_BMC_CHANNEL;
30946diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
30947index abae8c9..8021979 100644
30948--- a/drivers/char/ipmi/ipmi_si_intf.c
30949+++ b/drivers/char/ipmi/ipmi_si_intf.c
30950@@ -277,7 +277,7 @@ struct smi_info {
30951 unsigned char slave_addr;
30952
30953 /* Counters and things for the proc filesystem. */
30954- atomic_t stats[SI_NUM_STATS];
30955+ atomic_unchecked_t stats[SI_NUM_STATS];
30956
30957 struct task_struct *thread;
30958
30959@@ -285,9 +285,9 @@ struct smi_info {
30960 };
30961
30962 #define smi_inc_stat(smi, stat) \
30963- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
30964+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
30965 #define smi_get_stat(smi, stat) \
30966- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
30967+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
30968
30969 #define SI_MAX_PARMS 4
30970
30971@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
30972 atomic_set(&new_smi->req_events, 0);
30973 new_smi->run_to_completion = 0;
30974 for (i = 0; i < SI_NUM_STATS; i++)
30975- atomic_set(&new_smi->stats[i], 0);
30976+ atomic_set_unchecked(&new_smi->stats[i], 0);
30977
30978 new_smi->interrupt_disabled = 0;
30979 atomic_set(&new_smi->stop_operation, 0);
30980diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
30981index 402838f..55e2200 100644
30982--- a/drivers/char/istallion.c
30983+++ b/drivers/char/istallion.c
30984@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
30985 * re-used for each stats call.
30986 */
30987 static comstats_t stli_comstats;
30988-static combrd_t stli_brdstats;
30989 static struct asystats stli_cdkstats;
30990
30991 /*****************************************************************************/
30992@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
30993 {
30994 struct stlibrd *brdp;
30995 unsigned int i;
30996+ combrd_t stli_brdstats;
30997
30998 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
30999 return -EFAULT;
31000@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31001 struct stliport stli_dummyport;
31002 struct stliport *portp;
31003
31004+ pax_track_stack();
31005+
31006 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31007 return -EFAULT;
31008 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31009@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31010 struct stlibrd stli_dummybrd;
31011 struct stlibrd *brdp;
31012
31013+ pax_track_stack();
31014+
31015 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31016 return -EFAULT;
31017 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31018diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31019index 950837c..e55a288 100644
31020--- a/drivers/char/keyboard.c
31021+++ b/drivers/char/keyboard.c
31022@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31023 kbd->kbdmode == VC_MEDIUMRAW) &&
31024 value != KVAL(K_SAK))
31025 return; /* SAK is allowed even in raw mode */
31026+
31027+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31028+ {
31029+ void *func = fn_handler[value];
31030+ if (func == fn_show_state || func == fn_show_ptregs ||
31031+ func == fn_show_mem)
31032+ return;
31033+ }
31034+#endif
31035+
31036 fn_handler[value](vc);
31037 }
31038
31039@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31040 .evbit = { BIT_MASK(EV_SND) },
31041 },
31042
31043- { }, /* Terminating entry */
31044+ { 0 }, /* Terminating entry */
31045 };
31046
31047 MODULE_DEVICE_TABLE(input, kbd_ids);
31048diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31049index 87c67b4..230527a 100644
31050--- a/drivers/char/mbcs.c
31051+++ b/drivers/char/mbcs.c
31052@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31053 return 0;
31054 }
31055
31056-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31057+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31058 {
31059 .part_num = MBCS_PART_NUM,
31060 .mfg_num = MBCS_MFG_NUM,
31061diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31062index 1270f64..8495f49 100644
31063--- a/drivers/char/mem.c
31064+++ b/drivers/char/mem.c
31065@@ -18,6 +18,7 @@
31066 #include <linux/raw.h>
31067 #include <linux/tty.h>
31068 #include <linux/capability.h>
31069+#include <linux/security.h>
31070 #include <linux/ptrace.h>
31071 #include <linux/device.h>
31072 #include <linux/highmem.h>
31073@@ -35,6 +36,10 @@
31074 # include <linux/efi.h>
31075 #endif
31076
31077+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31078+extern struct file_operations grsec_fops;
31079+#endif
31080+
31081 static inline unsigned long size_inside_page(unsigned long start,
31082 unsigned long size)
31083 {
31084@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31085
31086 while (cursor < to) {
31087 if (!devmem_is_allowed(pfn)) {
31088+#ifdef CONFIG_GRKERNSEC_KMEM
31089+ gr_handle_mem_readwrite(from, to);
31090+#else
31091 printk(KERN_INFO
31092 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31093 current->comm, from, to);
31094+#endif
31095 return 0;
31096 }
31097 cursor += PAGE_SIZE;
31098@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31099 }
31100 return 1;
31101 }
31102+#elif defined(CONFIG_GRKERNSEC_KMEM)
31103+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31104+{
31105+ return 0;
31106+}
31107 #else
31108 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31109 {
31110@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31111 #endif
31112
31113 while (count > 0) {
31114+ char *temp;
31115+
31116 /*
31117 * Handle first page in case it's not aligned
31118 */
31119@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31120 if (!ptr)
31121 return -EFAULT;
31122
31123- if (copy_to_user(buf, ptr, sz)) {
31124+#ifdef CONFIG_PAX_USERCOPY
31125+ temp = kmalloc(sz, GFP_KERNEL);
31126+ if (!temp) {
31127+ unxlate_dev_mem_ptr(p, ptr);
31128+ return -ENOMEM;
31129+ }
31130+ memcpy(temp, ptr, sz);
31131+#else
31132+ temp = ptr;
31133+#endif
31134+
31135+ if (copy_to_user(buf, temp, sz)) {
31136+
31137+#ifdef CONFIG_PAX_USERCOPY
31138+ kfree(temp);
31139+#endif
31140+
31141 unxlate_dev_mem_ptr(p, ptr);
31142 return -EFAULT;
31143 }
31144
31145+#ifdef CONFIG_PAX_USERCOPY
31146+ kfree(temp);
31147+#endif
31148+
31149 unxlate_dev_mem_ptr(p, ptr);
31150
31151 buf += sz;
31152@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31153 size_t count, loff_t *ppos)
31154 {
31155 unsigned long p = *ppos;
31156- ssize_t low_count, read, sz;
31157+ ssize_t low_count, read, sz, err = 0;
31158 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31159- int err = 0;
31160
31161 read = 0;
31162 if (p < (unsigned long) high_memory) {
31163@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31164 }
31165 #endif
31166 while (low_count > 0) {
31167+ char *temp;
31168+
31169 sz = size_inside_page(p, low_count);
31170
31171 /*
31172@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31173 */
31174 kbuf = xlate_dev_kmem_ptr((char *)p);
31175
31176- if (copy_to_user(buf, kbuf, sz))
31177+#ifdef CONFIG_PAX_USERCOPY
31178+ temp = kmalloc(sz, GFP_KERNEL);
31179+ if (!temp)
31180+ return -ENOMEM;
31181+ memcpy(temp, kbuf, sz);
31182+#else
31183+ temp = kbuf;
31184+#endif
31185+
31186+ err = copy_to_user(buf, temp, sz);
31187+
31188+#ifdef CONFIG_PAX_USERCOPY
31189+ kfree(temp);
31190+#endif
31191+
31192+ if (err)
31193 return -EFAULT;
31194 buf += sz;
31195 p += sz;
31196@@ -889,6 +941,9 @@ static const struct memdev {
31197 #ifdef CONFIG_CRASH_DUMP
31198 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31199 #endif
31200+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31201+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31202+#endif
31203 };
31204
31205 static int memory_open(struct inode *inode, struct file *filp)
31206diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31207index 674b3ab..a8d1970 100644
31208--- a/drivers/char/pcmcia/ipwireless/tty.c
31209+++ b/drivers/char/pcmcia/ipwireless/tty.c
31210@@ -29,6 +29,7 @@
31211 #include <linux/tty_driver.h>
31212 #include <linux/tty_flip.h>
31213 #include <linux/uaccess.h>
31214+#include <asm/local.h>
31215
31216 #include "tty.h"
31217 #include "network.h"
31218@@ -51,7 +52,7 @@ struct ipw_tty {
31219 int tty_type;
31220 struct ipw_network *network;
31221 struct tty_struct *linux_tty;
31222- int open_count;
31223+ local_t open_count;
31224 unsigned int control_lines;
31225 struct mutex ipw_tty_mutex;
31226 int tx_bytes_queued;
31227@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31228 mutex_unlock(&tty->ipw_tty_mutex);
31229 return -ENODEV;
31230 }
31231- if (tty->open_count == 0)
31232+ if (local_read(&tty->open_count) == 0)
31233 tty->tx_bytes_queued = 0;
31234
31235- tty->open_count++;
31236+ local_inc(&tty->open_count);
31237
31238 tty->linux_tty = linux_tty;
31239 linux_tty->driver_data = tty;
31240@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31241
31242 static void do_ipw_close(struct ipw_tty *tty)
31243 {
31244- tty->open_count--;
31245-
31246- if (tty->open_count == 0) {
31247+ if (local_dec_return(&tty->open_count) == 0) {
31248 struct tty_struct *linux_tty = tty->linux_tty;
31249
31250 if (linux_tty != NULL) {
31251@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31252 return;
31253
31254 mutex_lock(&tty->ipw_tty_mutex);
31255- if (tty->open_count == 0) {
31256+ if (local_read(&tty->open_count) == 0) {
31257 mutex_unlock(&tty->ipw_tty_mutex);
31258 return;
31259 }
31260@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31261 return;
31262 }
31263
31264- if (!tty->open_count) {
31265+ if (!local_read(&tty->open_count)) {
31266 mutex_unlock(&tty->ipw_tty_mutex);
31267 return;
31268 }
31269@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31270 return -ENODEV;
31271
31272 mutex_lock(&tty->ipw_tty_mutex);
31273- if (!tty->open_count) {
31274+ if (!local_read(&tty->open_count)) {
31275 mutex_unlock(&tty->ipw_tty_mutex);
31276 return -EINVAL;
31277 }
31278@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31279 if (!tty)
31280 return -ENODEV;
31281
31282- if (!tty->open_count)
31283+ if (!local_read(&tty->open_count))
31284 return -EINVAL;
31285
31286 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31287@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31288 if (!tty)
31289 return 0;
31290
31291- if (!tty->open_count)
31292+ if (!local_read(&tty->open_count))
31293 return 0;
31294
31295 return tty->tx_bytes_queued;
31296@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31297 if (!tty)
31298 return -ENODEV;
31299
31300- if (!tty->open_count)
31301+ if (!local_read(&tty->open_count))
31302 return -EINVAL;
31303
31304 return get_control_lines(tty);
31305@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31306 if (!tty)
31307 return -ENODEV;
31308
31309- if (!tty->open_count)
31310+ if (!local_read(&tty->open_count))
31311 return -EINVAL;
31312
31313 return set_control_lines(tty, set, clear);
31314@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31315 if (!tty)
31316 return -ENODEV;
31317
31318- if (!tty->open_count)
31319+ if (!local_read(&tty->open_count))
31320 return -EINVAL;
31321
31322 /* FIXME: Exactly how is the tty object locked here .. */
31323@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31324 against a parallel ioctl etc */
31325 mutex_lock(&ttyj->ipw_tty_mutex);
31326 }
31327- while (ttyj->open_count)
31328+ while (local_read(&ttyj->open_count))
31329 do_ipw_close(ttyj);
31330 ipwireless_disassociate_network_ttys(network,
31331 ttyj->channel_idx);
31332diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31333index 62f282e..e45c45c 100644
31334--- a/drivers/char/pty.c
31335+++ b/drivers/char/pty.c
31336@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31337 register_sysctl_table(pty_root_table);
31338
31339 /* Now create the /dev/ptmx special device */
31340+ pax_open_kernel();
31341 tty_default_fops(&ptmx_fops);
31342- ptmx_fops.open = ptmx_open;
31343+ *(void **)&ptmx_fops.open = ptmx_open;
31344+ pax_close_kernel();
31345
31346 cdev_init(&ptmx_cdev, &ptmx_fops);
31347 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31348diff --git a/drivers/char/random.c b/drivers/char/random.c
31349index 3a19e2d..6ed09d3 100644
31350--- a/drivers/char/random.c
31351+++ b/drivers/char/random.c
31352@@ -254,8 +254,13 @@
31353 /*
31354 * Configuration information
31355 */
31356+#ifdef CONFIG_GRKERNSEC_RANDNET
31357+#define INPUT_POOL_WORDS 512
31358+#define OUTPUT_POOL_WORDS 128
31359+#else
31360 #define INPUT_POOL_WORDS 128
31361 #define OUTPUT_POOL_WORDS 32
31362+#endif
31363 #define SEC_XFER_SIZE 512
31364
31365 /*
31366@@ -292,10 +297,17 @@ static struct poolinfo {
31367 int poolwords;
31368 int tap1, tap2, tap3, tap4, tap5;
31369 } poolinfo_table[] = {
31370+#ifdef CONFIG_GRKERNSEC_RANDNET
31371+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31372+ { 512, 411, 308, 208, 104, 1 },
31373+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31374+ { 128, 103, 76, 51, 25, 1 },
31375+#else
31376 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31377 { 128, 103, 76, 51, 25, 1 },
31378 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31379 { 32, 26, 20, 14, 7, 1 },
31380+#endif
31381 #if 0
31382 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31383 { 2048, 1638, 1231, 819, 411, 1 },
31384@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31385 #include <linux/sysctl.h>
31386
31387 static int min_read_thresh = 8, min_write_thresh;
31388-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31389+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31390 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31391 static char sysctl_bootid[16];
31392
31393diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31394index 0e29a23..0efc2c2 100644
31395--- a/drivers/char/rocket.c
31396+++ b/drivers/char/rocket.c
31397@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31398 struct rocket_ports tmp;
31399 int board;
31400
31401+ pax_track_stack();
31402+
31403 if (!retports)
31404 return -EFAULT;
31405 memset(&tmp, 0, sizeof (tmp));
31406diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31407index 8c262aa..4d3b058 100644
31408--- a/drivers/char/sonypi.c
31409+++ b/drivers/char/sonypi.c
31410@@ -55,6 +55,7 @@
31411 #include <asm/uaccess.h>
31412 #include <asm/io.h>
31413 #include <asm/system.h>
31414+#include <asm/local.h>
31415
31416 #include <linux/sonypi.h>
31417
31418@@ -491,7 +492,7 @@ static struct sonypi_device {
31419 spinlock_t fifo_lock;
31420 wait_queue_head_t fifo_proc_list;
31421 struct fasync_struct *fifo_async;
31422- int open_count;
31423+ local_t open_count;
31424 int model;
31425 struct input_dev *input_jog_dev;
31426 struct input_dev *input_key_dev;
31427@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31428 static int sonypi_misc_release(struct inode *inode, struct file *file)
31429 {
31430 mutex_lock(&sonypi_device.lock);
31431- sonypi_device.open_count--;
31432+ local_dec(&sonypi_device.open_count);
31433 mutex_unlock(&sonypi_device.lock);
31434 return 0;
31435 }
31436@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31437 lock_kernel();
31438 mutex_lock(&sonypi_device.lock);
31439 /* Flush input queue on first open */
31440- if (!sonypi_device.open_count)
31441+ if (!local_read(&sonypi_device.open_count))
31442 kfifo_reset(sonypi_device.fifo);
31443- sonypi_device.open_count++;
31444+ local_inc(&sonypi_device.open_count);
31445 mutex_unlock(&sonypi_device.lock);
31446 unlock_kernel();
31447 return 0;
31448diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31449index db6dcfa..13834cb 100644
31450--- a/drivers/char/stallion.c
31451+++ b/drivers/char/stallion.c
31452@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31453 struct stlport stl_dummyport;
31454 struct stlport *portp;
31455
31456+ pax_track_stack();
31457+
31458 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31459 return -EFAULT;
31460 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31461diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31462index a0789f6..cea3902 100644
31463--- a/drivers/char/tpm/tpm.c
31464+++ b/drivers/char/tpm/tpm.c
31465@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31466 chip->vendor.req_complete_val)
31467 goto out_recv;
31468
31469- if ((status == chip->vendor.req_canceled)) {
31470+ if (status == chip->vendor.req_canceled) {
31471 dev_err(chip->dev, "Operation Canceled\n");
31472 rc = -ECANCELED;
31473 goto out;
31474@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31475
31476 struct tpm_chip *chip = dev_get_drvdata(dev);
31477
31478+ pax_track_stack();
31479+
31480 tpm_cmd.header.in = tpm_readpubek_header;
31481 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31482 "attempting to read the PUBEK");
31483diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31484index bf2170f..ce8cab9 100644
31485--- a/drivers/char/tpm/tpm_bios.c
31486+++ b/drivers/char/tpm/tpm_bios.c
31487@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31488 event = addr;
31489
31490 if ((event->event_type == 0 && event->event_size == 0) ||
31491- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31492+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31493 return NULL;
31494
31495 return addr;
31496@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31497 return NULL;
31498
31499 if ((event->event_type == 0 && event->event_size == 0) ||
31500- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31501+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31502 return NULL;
31503
31504 (*pos)++;
31505@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31506 int i;
31507
31508 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31509- seq_putc(m, data[i]);
31510+ if (!seq_putc(m, data[i]))
31511+ return -EFAULT;
31512
31513 return 0;
31514 }
31515@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31516 log->bios_event_log_end = log->bios_event_log + len;
31517
31518 virt = acpi_os_map_memory(start, len);
31519+ if (!virt) {
31520+ kfree(log->bios_event_log);
31521+ log->bios_event_log = NULL;
31522+ return -EFAULT;
31523+ }
31524
31525- memcpy(log->bios_event_log, virt, len);
31526+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31527
31528 acpi_os_unmap_memory(virt, len);
31529 return 0;
31530diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31531index 123cedf..137edef 100644
31532--- a/drivers/char/tty_io.c
31533+++ b/drivers/char/tty_io.c
31534@@ -1774,6 +1774,7 @@ got_driver:
31535
31536 if (IS_ERR(tty)) {
31537 mutex_unlock(&tty_mutex);
31538+ tty_driver_kref_put(driver);
31539 return PTR_ERR(tty);
31540 }
31541 }
31542@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31543 return retval;
31544 }
31545
31546+EXPORT_SYMBOL(tty_ioctl);
31547+
31548 #ifdef CONFIG_COMPAT
31549-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31550+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31551 unsigned long arg)
31552 {
31553 struct inode *inode = file->f_dentry->d_inode;
31554@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31555
31556 return retval;
31557 }
31558+
31559+EXPORT_SYMBOL(tty_compat_ioctl);
31560 #endif
31561
31562 /*
31563@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31564
31565 void tty_default_fops(struct file_operations *fops)
31566 {
31567- *fops = tty_fops;
31568+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31569 }
31570
31571 /*
31572diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31573index d814a3d..b55b9c9 100644
31574--- a/drivers/char/tty_ldisc.c
31575+++ b/drivers/char/tty_ldisc.c
31576@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31577 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31578 struct tty_ldisc_ops *ldo = ld->ops;
31579
31580- ldo->refcount--;
31581+ atomic_dec(&ldo->refcount);
31582 module_put(ldo->owner);
31583 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31584
31585@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31586 spin_lock_irqsave(&tty_ldisc_lock, flags);
31587 tty_ldiscs[disc] = new_ldisc;
31588 new_ldisc->num = disc;
31589- new_ldisc->refcount = 0;
31590+ atomic_set(&new_ldisc->refcount, 0);
31591 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31592
31593 return ret;
31594@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31595 return -EINVAL;
31596
31597 spin_lock_irqsave(&tty_ldisc_lock, flags);
31598- if (tty_ldiscs[disc]->refcount)
31599+ if (atomic_read(&tty_ldiscs[disc]->refcount))
31600 ret = -EBUSY;
31601 else
31602 tty_ldiscs[disc] = NULL;
31603@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31604 if (ldops) {
31605 ret = ERR_PTR(-EAGAIN);
31606 if (try_module_get(ldops->owner)) {
31607- ldops->refcount++;
31608+ atomic_inc(&ldops->refcount);
31609 ret = ldops;
31610 }
31611 }
31612@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31613 unsigned long flags;
31614
31615 spin_lock_irqsave(&tty_ldisc_lock, flags);
31616- ldops->refcount--;
31617+ atomic_dec(&ldops->refcount);
31618 module_put(ldops->owner);
31619 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31620 }
31621diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31622index a035ae3..c27fe2c 100644
31623--- a/drivers/char/virtio_console.c
31624+++ b/drivers/char/virtio_console.c
31625@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31626 * virtqueue, so we let the drivers do some boutique early-output thing. */
31627 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31628 {
31629- virtio_cons.put_chars = put_chars;
31630+ pax_open_kernel();
31631+ *(void **)&virtio_cons.put_chars = put_chars;
31632+ pax_close_kernel();
31633 return hvc_instantiate(0, 0, &virtio_cons);
31634 }
31635
31636@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31637 out_vq = vqs[1];
31638
31639 /* Start using the new console output. */
31640- virtio_cons.get_chars = get_chars;
31641- virtio_cons.put_chars = put_chars;
31642- virtio_cons.notifier_add = notifier_add_vio;
31643- virtio_cons.notifier_del = notifier_del_vio;
31644- virtio_cons.notifier_hangup = notifier_del_vio;
31645+ pax_open_kernel();
31646+ *(void **)&virtio_cons.get_chars = get_chars;
31647+ *(void **)&virtio_cons.put_chars = put_chars;
31648+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31649+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31650+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31651+ pax_close_kernel();
31652
31653 /* The first argument of hvc_alloc() is the virtual console number, so
31654 * we use zero. The second argument is the parameter for the
31655diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31656index 0c80c68..53d59c1 100644
31657--- a/drivers/char/vt.c
31658+++ b/drivers/char/vt.c
31659@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31660
31661 static void notify_write(struct vc_data *vc, unsigned int unicode)
31662 {
31663- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31664+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
31665 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31666 }
31667
31668diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31669index 6351a26..999af95 100644
31670--- a/drivers/char/vt_ioctl.c
31671+++ b/drivers/char/vt_ioctl.c
31672@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31673 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31674 return -EFAULT;
31675
31676- if (!capable(CAP_SYS_TTY_CONFIG))
31677- perm = 0;
31678-
31679 switch (cmd) {
31680 case KDGKBENT:
31681 key_map = key_maps[s];
31682@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31683 val = (i ? K_HOLE : K_NOSUCHMAP);
31684 return put_user(val, &user_kbe->kb_value);
31685 case KDSKBENT:
31686+ if (!capable(CAP_SYS_TTY_CONFIG))
31687+ perm = 0;
31688+
31689 if (!perm)
31690 return -EPERM;
31691+
31692 if (!i && v == K_NOSUCHMAP) {
31693 /* deallocate map */
31694 key_map = key_maps[s];
31695@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31696 int i, j, k;
31697 int ret;
31698
31699- if (!capable(CAP_SYS_TTY_CONFIG))
31700- perm = 0;
31701-
31702 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31703 if (!kbs) {
31704 ret = -ENOMEM;
31705@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31706 kfree(kbs);
31707 return ((p && *p) ? -EOVERFLOW : 0);
31708 case KDSKBSENT:
31709+ if (!capable(CAP_SYS_TTY_CONFIG))
31710+ perm = 0;
31711+
31712 if (!perm) {
31713 ret = -EPERM;
31714 goto reterr;
31715diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31716index c7ae026..1769c1d 100644
31717--- a/drivers/cpufreq/cpufreq.c
31718+++ b/drivers/cpufreq/cpufreq.c
31719@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31720 complete(&policy->kobj_unregister);
31721 }
31722
31723-static struct sysfs_ops sysfs_ops = {
31724+static const struct sysfs_ops sysfs_ops = {
31725 .show = show,
31726 .store = store,
31727 };
31728diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31729index 97b0038..2056670 100644
31730--- a/drivers/cpuidle/sysfs.c
31731+++ b/drivers/cpuidle/sysfs.c
31732@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31733 return ret;
31734 }
31735
31736-static struct sysfs_ops cpuidle_sysfs_ops = {
31737+static const struct sysfs_ops cpuidle_sysfs_ops = {
31738 .show = cpuidle_show,
31739 .store = cpuidle_store,
31740 };
31741@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31742 return ret;
31743 }
31744
31745-static struct sysfs_ops cpuidle_state_sysfs_ops = {
31746+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31747 .show = cpuidle_state_show,
31748 };
31749
31750@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31751 .release = cpuidle_state_sysfs_release,
31752 };
31753
31754-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31755+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31756 {
31757 kobject_put(&device->kobjs[i]->kobj);
31758 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31759diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31760index 5f753fc..0377ae9 100644
31761--- a/drivers/crypto/hifn_795x.c
31762+++ b/drivers/crypto/hifn_795x.c
31763@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31764 0xCA, 0x34, 0x2B, 0x2E};
31765 struct scatterlist sg;
31766
31767+ pax_track_stack();
31768+
31769 memset(src, 0, sizeof(src));
31770 memset(ctx.key, 0, sizeof(ctx.key));
31771
31772diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31773index 71e6482..de8d96c 100644
31774--- a/drivers/crypto/padlock-aes.c
31775+++ b/drivers/crypto/padlock-aes.c
31776@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
31777 struct crypto_aes_ctx gen_aes;
31778 int cpu;
31779
31780+ pax_track_stack();
31781+
31782 if (key_len % 8) {
31783 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
31784 return -EINVAL;
31785diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
31786index dcc4ab7..cc834bb 100644
31787--- a/drivers/dma/ioat/dma.c
31788+++ b/drivers/dma/ioat/dma.c
31789@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
31790 return entry->show(&chan->common, page);
31791 }
31792
31793-struct sysfs_ops ioat_sysfs_ops = {
31794+const struct sysfs_ops ioat_sysfs_ops = {
31795 .show = ioat_attr_show,
31796 };
31797
31798diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
31799index bbc3e78..f2db62c 100644
31800--- a/drivers/dma/ioat/dma.h
31801+++ b/drivers/dma/ioat/dma.h
31802@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
31803 unsigned long *phys_complete);
31804 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
31805 void ioat_kobject_del(struct ioatdma_device *device);
31806-extern struct sysfs_ops ioat_sysfs_ops;
31807+extern const struct sysfs_ops ioat_sysfs_ops;
31808 extern struct ioat_sysfs_entry ioat_version_attr;
31809 extern struct ioat_sysfs_entry ioat_cap_attr;
31810 #endif /* IOATDMA_H */
31811diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
31812index 9908c9e..3ceb0e5 100644
31813--- a/drivers/dma/ioat/dma_v3.c
31814+++ b/drivers/dma/ioat/dma_v3.c
31815@@ -71,10 +71,10 @@
31816 /* provide a lookup table for setting the source address in the base or
31817 * extended descriptor of an xor or pq descriptor
31818 */
31819-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
31820-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
31821-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
31822-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
31823+static const u8 xor_idx_to_desc = 0xd0;
31824+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
31825+static const u8 pq_idx_to_desc = 0xf8;
31826+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
31827
31828 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
31829 {
31830diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
31831index 85c464a..afd1e73 100644
31832--- a/drivers/edac/amd64_edac.c
31833+++ b/drivers/edac/amd64_edac.c
31834@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
31835 * PCI core identifies what devices are on a system during boot, and then
31836 * inquiry this table to see if this driver is for a given device found.
31837 */
31838-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
31839+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
31840 {
31841 .vendor = PCI_VENDOR_ID_AMD,
31842 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
31843diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
31844index 2b95f1a..4f52793 100644
31845--- a/drivers/edac/amd76x_edac.c
31846+++ b/drivers/edac/amd76x_edac.c
31847@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
31848 edac_mc_free(mci);
31849 }
31850
31851-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
31852+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
31853 {
31854 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31855 AMD762},
31856diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
31857index d205d49..74c9672 100644
31858--- a/drivers/edac/e752x_edac.c
31859+++ b/drivers/edac/e752x_edac.c
31860@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
31861 edac_mc_free(mci);
31862 }
31863
31864-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
31865+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
31866 {
31867 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31868 E7520},
31869diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
31870index c7d11cc..c59c1ca 100644
31871--- a/drivers/edac/e7xxx_edac.c
31872+++ b/drivers/edac/e7xxx_edac.c
31873@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
31874 edac_mc_free(mci);
31875 }
31876
31877-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
31878+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
31879 {
31880 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31881 E7205},
31882diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
31883index 5376457..5fdedbc 100644
31884--- a/drivers/edac/edac_device_sysfs.c
31885+++ b/drivers/edac/edac_device_sysfs.c
31886@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
31887 }
31888
31889 /* edac_dev file operations for an 'ctl_info' */
31890-static struct sysfs_ops device_ctl_info_ops = {
31891+static const struct sysfs_ops device_ctl_info_ops = {
31892 .show = edac_dev_ctl_info_show,
31893 .store = edac_dev_ctl_info_store
31894 };
31895@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
31896 }
31897
31898 /* edac_dev file operations for an 'instance' */
31899-static struct sysfs_ops device_instance_ops = {
31900+static const struct sysfs_ops device_instance_ops = {
31901 .show = edac_dev_instance_show,
31902 .store = edac_dev_instance_store
31903 };
31904@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
31905 }
31906
31907 /* edac_dev file operations for a 'block' */
31908-static struct sysfs_ops device_block_ops = {
31909+static const struct sysfs_ops device_block_ops = {
31910 .show = edac_dev_block_show,
31911 .store = edac_dev_block_store
31912 };
31913diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
31914index e1d4ce0..88840e9 100644
31915--- a/drivers/edac/edac_mc_sysfs.c
31916+++ b/drivers/edac/edac_mc_sysfs.c
31917@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
31918 return -EIO;
31919 }
31920
31921-static struct sysfs_ops csrowfs_ops = {
31922+static const struct sysfs_ops csrowfs_ops = {
31923 .show = csrowdev_show,
31924 .store = csrowdev_store
31925 };
31926@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
31927 }
31928
31929 /* Intermediate show/store table */
31930-static struct sysfs_ops mci_ops = {
31931+static const struct sysfs_ops mci_ops = {
31932 .show = mcidev_show,
31933 .store = mcidev_store
31934 };
31935diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
31936index 422728c..d8d9c88 100644
31937--- a/drivers/edac/edac_pci_sysfs.c
31938+++ b/drivers/edac/edac_pci_sysfs.c
31939@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
31940 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
31941 static int edac_pci_poll_msec = 1000; /* one second workq period */
31942
31943-static atomic_t pci_parity_count = ATOMIC_INIT(0);
31944-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
31945+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
31946+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
31947
31948 static struct kobject *edac_pci_top_main_kobj;
31949 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
31950@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
31951 }
31952
31953 /* fs_ops table */
31954-static struct sysfs_ops pci_instance_ops = {
31955+static const struct sysfs_ops pci_instance_ops = {
31956 .show = edac_pci_instance_show,
31957 .store = edac_pci_instance_store
31958 };
31959@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
31960 return -EIO;
31961 }
31962
31963-static struct sysfs_ops edac_pci_sysfs_ops = {
31964+static const struct sysfs_ops edac_pci_sysfs_ops = {
31965 .show = edac_pci_dev_show,
31966 .store = edac_pci_dev_store
31967 };
31968@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31969 edac_printk(KERN_CRIT, EDAC_PCI,
31970 "Signaled System Error on %s\n",
31971 pci_name(dev));
31972- atomic_inc(&pci_nonparity_count);
31973+ atomic_inc_unchecked(&pci_nonparity_count);
31974 }
31975
31976 if (status & (PCI_STATUS_PARITY)) {
31977@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31978 "Master Data Parity Error on %s\n",
31979 pci_name(dev));
31980
31981- atomic_inc(&pci_parity_count);
31982+ atomic_inc_unchecked(&pci_parity_count);
31983 }
31984
31985 if (status & (PCI_STATUS_DETECTED_PARITY)) {
31986@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31987 "Detected Parity Error on %s\n",
31988 pci_name(dev));
31989
31990- atomic_inc(&pci_parity_count);
31991+ atomic_inc_unchecked(&pci_parity_count);
31992 }
31993 }
31994
31995@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
31996 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
31997 "Signaled System Error on %s\n",
31998 pci_name(dev));
31999- atomic_inc(&pci_nonparity_count);
32000+ atomic_inc_unchecked(&pci_nonparity_count);
32001 }
32002
32003 if (status & (PCI_STATUS_PARITY)) {
32004@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32005 "Master Data Parity Error on "
32006 "%s\n", pci_name(dev));
32007
32008- atomic_inc(&pci_parity_count);
32009+ atomic_inc_unchecked(&pci_parity_count);
32010 }
32011
32012 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32013@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32014 "Detected Parity Error on %s\n",
32015 pci_name(dev));
32016
32017- atomic_inc(&pci_parity_count);
32018+ atomic_inc_unchecked(&pci_parity_count);
32019 }
32020 }
32021 }
32022@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32023 if (!check_pci_errors)
32024 return;
32025
32026- before_count = atomic_read(&pci_parity_count);
32027+ before_count = atomic_read_unchecked(&pci_parity_count);
32028
32029 /* scan all PCI devices looking for a Parity Error on devices and
32030 * bridges.
32031@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32032 /* Only if operator has selected panic on PCI Error */
32033 if (edac_pci_get_panic_on_pe()) {
32034 /* If the count is different 'after' from 'before' */
32035- if (before_count != atomic_read(&pci_parity_count))
32036+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32037 panic("EDAC: PCI Parity Error");
32038 }
32039 }
32040diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32041index 6c9a0f2..9c1cf7e 100644
32042--- a/drivers/edac/i3000_edac.c
32043+++ b/drivers/edac/i3000_edac.c
32044@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32045 edac_mc_free(mci);
32046 }
32047
32048-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32049+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32050 {
32051 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32052 I3000},
32053diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32054index fde4db9..fe108f9 100644
32055--- a/drivers/edac/i3200_edac.c
32056+++ b/drivers/edac/i3200_edac.c
32057@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32058 edac_mc_free(mci);
32059 }
32060
32061-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32062+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32063 {
32064 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32065 I3200},
32066diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32067index adc10a2..57d4ccf 100644
32068--- a/drivers/edac/i5000_edac.c
32069+++ b/drivers/edac/i5000_edac.c
32070@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32071 *
32072 * The "E500P" device is the first device supported.
32073 */
32074-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32075+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32076 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32077 .driver_data = I5000P},
32078
32079diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32080index 22db05a..b2b5503 100644
32081--- a/drivers/edac/i5100_edac.c
32082+++ b/drivers/edac/i5100_edac.c
32083@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32084 edac_mc_free(mci);
32085 }
32086
32087-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32088+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32089 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32090 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32091 { 0, }
32092diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32093index f99d106..f050710 100644
32094--- a/drivers/edac/i5400_edac.c
32095+++ b/drivers/edac/i5400_edac.c
32096@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32097 *
32098 * The "E500P" device is the first device supported.
32099 */
32100-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32101+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32102 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32103 {0,} /* 0 terminated list. */
32104 };
32105diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32106index 577760a..9ce16ce 100644
32107--- a/drivers/edac/i82443bxgx_edac.c
32108+++ b/drivers/edac/i82443bxgx_edac.c
32109@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32110
32111 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32112
32113-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32114+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32115 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32116 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32117 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32118diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32119index c0088ba..64a7b98 100644
32120--- a/drivers/edac/i82860_edac.c
32121+++ b/drivers/edac/i82860_edac.c
32122@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32123 edac_mc_free(mci);
32124 }
32125
32126-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32127+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32128 {
32129 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32130 I82860},
32131diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32132index b2d83b9..a34357b 100644
32133--- a/drivers/edac/i82875p_edac.c
32134+++ b/drivers/edac/i82875p_edac.c
32135@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32136 edac_mc_free(mci);
32137 }
32138
32139-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32140+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32141 {
32142 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32143 I82875P},
32144diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32145index 2eed3ea..87bbbd1 100644
32146--- a/drivers/edac/i82975x_edac.c
32147+++ b/drivers/edac/i82975x_edac.c
32148@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32149 edac_mc_free(mci);
32150 }
32151
32152-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32153+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32154 {
32155 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32156 I82975X
32157diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32158index 9900675..78ac2b6 100644
32159--- a/drivers/edac/r82600_edac.c
32160+++ b/drivers/edac/r82600_edac.c
32161@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32162 edac_mc_free(mci);
32163 }
32164
32165-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32166+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32167 {
32168 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32169 },
32170diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32171index d4ec605..4cfec4e 100644
32172--- a/drivers/edac/x38_edac.c
32173+++ b/drivers/edac/x38_edac.c
32174@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32175 edac_mc_free(mci);
32176 }
32177
32178-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32179+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32180 {
32181 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32182 X38},
32183diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32184index 3fc2ceb..daf098f 100644
32185--- a/drivers/firewire/core-card.c
32186+++ b/drivers/firewire/core-card.c
32187@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32188
32189 void fw_core_remove_card(struct fw_card *card)
32190 {
32191- struct fw_card_driver dummy_driver = dummy_driver_template;
32192+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32193
32194 card->driver->update_phy_reg(card, 4,
32195 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32196diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32197index 4560d8f..36db24a 100644
32198--- a/drivers/firewire/core-cdev.c
32199+++ b/drivers/firewire/core-cdev.c
32200@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32201 int ret;
32202
32203 if ((request->channels == 0 && request->bandwidth == 0) ||
32204- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32205- request->bandwidth < 0)
32206+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32207 return -EINVAL;
32208
32209 r = kmalloc(sizeof(*r), GFP_KERNEL);
32210diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32211index da628c7..cf54a2c 100644
32212--- a/drivers/firewire/core-transaction.c
32213+++ b/drivers/firewire/core-transaction.c
32214@@ -36,6 +36,7 @@
32215 #include <linux/string.h>
32216 #include <linux/timer.h>
32217 #include <linux/types.h>
32218+#include <linux/sched.h>
32219
32220 #include <asm/byteorder.h>
32221
32222@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32223 struct transaction_callback_data d;
32224 struct fw_transaction t;
32225
32226+ pax_track_stack();
32227+
32228 init_completion(&d.done);
32229 d.payload = payload;
32230 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32231diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32232index 7ff6e75..a2965d9 100644
32233--- a/drivers/firewire/core.h
32234+++ b/drivers/firewire/core.h
32235@@ -86,6 +86,7 @@ struct fw_card_driver {
32236
32237 int (*stop_iso)(struct fw_iso_context *ctx);
32238 };
32239+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32240
32241 void fw_card_initialize(struct fw_card *card,
32242 const struct fw_card_driver *driver, struct device *device);
32243diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32244index 3a2ccb0..82fd7c4 100644
32245--- a/drivers/firmware/dmi_scan.c
32246+++ b/drivers/firmware/dmi_scan.c
32247@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32248 }
32249 }
32250 else {
32251- /*
32252- * no iounmap() for that ioremap(); it would be a no-op, but
32253- * it's so early in setup that sucker gets confused into doing
32254- * what it shouldn't if we actually call it.
32255- */
32256 p = dmi_ioremap(0xF0000, 0x10000);
32257 if (p == NULL)
32258 goto error;
32259@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32260 if (buf == NULL)
32261 return -1;
32262
32263- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32264+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32265
32266 iounmap(buf);
32267 return 0;
32268diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32269index 9e4f59d..110e24e 100644
32270--- a/drivers/firmware/edd.c
32271+++ b/drivers/firmware/edd.c
32272@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32273 return ret;
32274 }
32275
32276-static struct sysfs_ops edd_attr_ops = {
32277+static const struct sysfs_ops edd_attr_ops = {
32278 .show = edd_attr_show,
32279 };
32280
32281diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32282index f4f709d..082f06e 100644
32283--- a/drivers/firmware/efivars.c
32284+++ b/drivers/firmware/efivars.c
32285@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32286 return ret;
32287 }
32288
32289-static struct sysfs_ops efivar_attr_ops = {
32290+static const struct sysfs_ops efivar_attr_ops = {
32291 .show = efivar_attr_show,
32292 .store = efivar_attr_store,
32293 };
32294diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32295index 051d1eb..0a5d4e7 100644
32296--- a/drivers/firmware/iscsi_ibft.c
32297+++ b/drivers/firmware/iscsi_ibft.c
32298@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32299 return ret;
32300 }
32301
32302-static struct sysfs_ops ibft_attr_ops = {
32303+static const struct sysfs_ops ibft_attr_ops = {
32304 .show = ibft_show_attribute,
32305 };
32306
32307diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32308index 56f9234..8c58c7b 100644
32309--- a/drivers/firmware/memmap.c
32310+++ b/drivers/firmware/memmap.c
32311@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32312 NULL
32313 };
32314
32315-static struct sysfs_ops memmap_attr_ops = {
32316+static const struct sysfs_ops memmap_attr_ops = {
32317 .show = memmap_attr_show,
32318 };
32319
32320diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32321index b16c9a8..2af7d3f 100644
32322--- a/drivers/gpio/vr41xx_giu.c
32323+++ b/drivers/gpio/vr41xx_giu.c
32324@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32325 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32326 maskl, pendl, maskh, pendh);
32327
32328- atomic_inc(&irq_err_count);
32329+ atomic_inc_unchecked(&irq_err_count);
32330
32331 return -EINVAL;
32332 }
32333diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32334index bea6efc..3dc0f42 100644
32335--- a/drivers/gpu/drm/drm_crtc.c
32336+++ b/drivers/gpu/drm/drm_crtc.c
32337@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32338 */
32339 if ((out_resp->count_modes >= mode_count) && mode_count) {
32340 copied = 0;
32341- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32342+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32343 list_for_each_entry(mode, &connector->modes, head) {
32344 drm_crtc_convert_to_umode(&u_mode, mode);
32345 if (copy_to_user(mode_ptr + copied,
32346@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32347
32348 if ((out_resp->count_props >= props_count) && props_count) {
32349 copied = 0;
32350- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32351- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32352+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32353+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32354 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32355 if (connector->property_ids[i] != 0) {
32356 if (put_user(connector->property_ids[i],
32357@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32358
32359 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32360 copied = 0;
32361- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32362+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32363 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32364 if (connector->encoder_ids[i] != 0) {
32365 if (put_user(connector->encoder_ids[i],
32366@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32367 }
32368
32369 for (i = 0; i < crtc_req->count_connectors; i++) {
32370- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32371+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32372 if (get_user(out_id, &set_connectors_ptr[i])) {
32373 ret = -EFAULT;
32374 goto out;
32375@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32376 out_resp->flags = property->flags;
32377
32378 if ((out_resp->count_values >= value_count) && value_count) {
32379- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32380+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32381 for (i = 0; i < value_count; i++) {
32382 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32383 ret = -EFAULT;
32384@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32385 if (property->flags & DRM_MODE_PROP_ENUM) {
32386 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32387 copied = 0;
32388- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32389+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32390 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32391
32392 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32393@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32394 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32395 copied = 0;
32396 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32397- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32398+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32399
32400 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32401 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32402@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32403 blob = obj_to_blob(obj);
32404
32405 if (out_resp->length == blob->length) {
32406- blob_ptr = (void *)(unsigned long)out_resp->data;
32407+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32408 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32409 ret = -EFAULT;
32410 goto done;
32411diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32412index 1b8745d..92fdbf6 100644
32413--- a/drivers/gpu/drm/drm_crtc_helper.c
32414+++ b/drivers/gpu/drm/drm_crtc_helper.c
32415@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32416 struct drm_crtc *tmp;
32417 int crtc_mask = 1;
32418
32419- WARN(!crtc, "checking null crtc?");
32420+ BUG_ON(!crtc);
32421
32422 dev = crtc->dev;
32423
32424@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32425
32426 adjusted_mode = drm_mode_duplicate(dev, mode);
32427
32428+ pax_track_stack();
32429+
32430 crtc->enabled = drm_helper_crtc_in_use(crtc);
32431
32432 if (!crtc->enabled)
32433diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32434index 0e27d98..dec8768 100644
32435--- a/drivers/gpu/drm/drm_drv.c
32436+++ b/drivers/gpu/drm/drm_drv.c
32437@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32438 char *kdata = NULL;
32439
32440 atomic_inc(&dev->ioctl_count);
32441- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32442+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32443 ++file_priv->ioctl_count;
32444
32445 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32446diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32447index ba14553..182d0bb 100644
32448--- a/drivers/gpu/drm/drm_fops.c
32449+++ b/drivers/gpu/drm/drm_fops.c
32450@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32451 }
32452
32453 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32454- atomic_set(&dev->counts[i], 0);
32455+ atomic_set_unchecked(&dev->counts[i], 0);
32456
32457 dev->sigdata.lock = NULL;
32458
32459@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32460
32461 retcode = drm_open_helper(inode, filp, dev);
32462 if (!retcode) {
32463- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32464+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32465 spin_lock(&dev->count_lock);
32466- if (!dev->open_count++) {
32467+ if (local_inc_return(&dev->open_count) == 1) {
32468 spin_unlock(&dev->count_lock);
32469 retcode = drm_setup(dev);
32470 goto out;
32471@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32472
32473 lock_kernel();
32474
32475- DRM_DEBUG("open_count = %d\n", dev->open_count);
32476+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32477
32478 if (dev->driver->preclose)
32479 dev->driver->preclose(dev, file_priv);
32480@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32481 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32482 task_pid_nr(current),
32483 (long)old_encode_dev(file_priv->minor->device),
32484- dev->open_count);
32485+ local_read(&dev->open_count));
32486
32487 /* if the master has gone away we can't do anything with the lock */
32488 if (file_priv->minor->master)
32489@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, struct file *filp)
32490 * End inline drm_release
32491 */
32492
32493- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32494+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32495 spin_lock(&dev->count_lock);
32496- if (!--dev->open_count) {
32497+ if (local_dec_and_test(&dev->open_count)) {
32498 if (atomic_read(&dev->ioctl_count)) {
32499 DRM_ERROR("Device busy: %d\n",
32500 atomic_read(&dev->ioctl_count));
32501diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32502index 8bf3770..7942280 100644
32503--- a/drivers/gpu/drm/drm_gem.c
32504+++ b/drivers/gpu/drm/drm_gem.c
32505@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32506 spin_lock_init(&dev->object_name_lock);
32507 idr_init(&dev->object_name_idr);
32508 atomic_set(&dev->object_count, 0);
32509- atomic_set(&dev->object_memory, 0);
32510+ atomic_set_unchecked(&dev->object_memory, 0);
32511 atomic_set(&dev->pin_count, 0);
32512- atomic_set(&dev->pin_memory, 0);
32513+ atomic_set_unchecked(&dev->pin_memory, 0);
32514 atomic_set(&dev->gtt_count, 0);
32515- atomic_set(&dev->gtt_memory, 0);
32516+ atomic_set_unchecked(&dev->gtt_memory, 0);
32517
32518 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32519 if (!mm) {
32520@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32521 goto fput;
32522 }
32523 atomic_inc(&dev->object_count);
32524- atomic_add(obj->size, &dev->object_memory);
32525+ atomic_add_unchecked(obj->size, &dev->object_memory);
32526 return obj;
32527 fput:
32528 fput(obj->filp);
32529@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32530
32531 fput(obj->filp);
32532 atomic_dec(&dev->object_count);
32533- atomic_sub(obj->size, &dev->object_memory);
32534+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32535 kfree(obj);
32536 }
32537 EXPORT_SYMBOL(drm_gem_object_free);
32538diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32539index f0f6c6b..34af322 100644
32540--- a/drivers/gpu/drm/drm_info.c
32541+++ b/drivers/gpu/drm/drm_info.c
32542@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32543 struct drm_local_map *map;
32544 struct drm_map_list *r_list;
32545
32546- /* Hardcoded from _DRM_FRAME_BUFFER,
32547- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32548- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32549- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32550+ static const char * const types[] = {
32551+ [_DRM_FRAME_BUFFER] = "FB",
32552+ [_DRM_REGISTERS] = "REG",
32553+ [_DRM_SHM] = "SHM",
32554+ [_DRM_AGP] = "AGP",
32555+ [_DRM_SCATTER_GATHER] = "SG",
32556+ [_DRM_CONSISTENT] = "PCI",
32557+ [_DRM_GEM] = "GEM" };
32558 const char *type;
32559 int i;
32560
32561@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32562 map = r_list->map;
32563 if (!map)
32564 continue;
32565- if (map->type < 0 || map->type > 5)
32566+ if (map->type >= ARRAY_SIZE(types))
32567 type = "??";
32568 else
32569 type = types[map->type];
32570@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32571 struct drm_device *dev = node->minor->dev;
32572
32573 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32574- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32575+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32576 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32577- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32578- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32579+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32580+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32581 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32582 return 0;
32583 }
32584@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32585 mutex_lock(&dev->struct_mutex);
32586 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32587 atomic_read(&dev->vma_count),
32588+#ifdef CONFIG_GRKERNSEC_HIDESYM
32589+ NULL, 0);
32590+#else
32591 high_memory, (u64)virt_to_phys(high_memory));
32592+#endif
32593
32594 list_for_each_entry(pt, &dev->vmalist, head) {
32595 vma = pt->vma;
32596@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32597 continue;
32598 seq_printf(m,
32599 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32600- pt->pid, vma->vm_start, vma->vm_end,
32601+ pt->pid,
32602+#ifdef CONFIG_GRKERNSEC_HIDESYM
32603+ 0, 0,
32604+#else
32605+ vma->vm_start, vma->vm_end,
32606+#endif
32607 vma->vm_flags & VM_READ ? 'r' : '-',
32608 vma->vm_flags & VM_WRITE ? 'w' : '-',
32609 vma->vm_flags & VM_EXEC ? 'x' : '-',
32610 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32611 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32612 vma->vm_flags & VM_IO ? 'i' : '-',
32613+#ifdef CONFIG_GRKERNSEC_HIDESYM
32614+ 0);
32615+#else
32616 vma->vm_pgoff);
32617+#endif
32618
32619 #if defined(__i386__)
32620 pgprot = pgprot_val(vma->vm_page_prot);
32621diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32622index 282d9fd..71e5f11 100644
32623--- a/drivers/gpu/drm/drm_ioc32.c
32624+++ b/drivers/gpu/drm/drm_ioc32.c
32625@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32626 request = compat_alloc_user_space(nbytes);
32627 if (!access_ok(VERIFY_WRITE, request, nbytes))
32628 return -EFAULT;
32629- list = (struct drm_buf_desc *) (request + 1);
32630+ list = (struct drm_buf_desc __user *) (request + 1);
32631
32632 if (__put_user(count, &request->count)
32633 || __put_user(list, &request->list))
32634@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32635 request = compat_alloc_user_space(nbytes);
32636 if (!access_ok(VERIFY_WRITE, request, nbytes))
32637 return -EFAULT;
32638- list = (struct drm_buf_pub *) (request + 1);
32639+ list = (struct drm_buf_pub __user *) (request + 1);
32640
32641 if (__put_user(count, &request->count)
32642 || __put_user(list, &request->list))
32643diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32644index 9b9ff46..4ea724c 100644
32645--- a/drivers/gpu/drm/drm_ioctl.c
32646+++ b/drivers/gpu/drm/drm_ioctl.c
32647@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32648 stats->data[i].value =
32649 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32650 else
32651- stats->data[i].value = atomic_read(&dev->counts[i]);
32652+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32653 stats->data[i].type = dev->types[i];
32654 }
32655
32656diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32657index e2f70a5..c703e86 100644
32658--- a/drivers/gpu/drm/drm_lock.c
32659+++ b/drivers/gpu/drm/drm_lock.c
32660@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32661 if (drm_lock_take(&master->lock, lock->context)) {
32662 master->lock.file_priv = file_priv;
32663 master->lock.lock_time = jiffies;
32664- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32665+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32666 break; /* Got lock */
32667 }
32668
32669@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32670 return -EINVAL;
32671 }
32672
32673- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32674+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32675
32676 /* kernel_context_switch isn't used by any of the x86 drm
32677 * modules but is required by the Sparc driver.
32678diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32679index 7d1d88c..b9131b2 100644
32680--- a/drivers/gpu/drm/i810/i810_dma.c
32681+++ b/drivers/gpu/drm/i810/i810_dma.c
32682@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32683 dma->buflist[vertex->idx],
32684 vertex->discard, vertex->used);
32685
32686- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32687- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32688+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32689+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32690 sarea_priv->last_enqueue = dev_priv->counter - 1;
32691 sarea_priv->last_dispatch = (int)hw_status[5];
32692
32693@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32694 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32695 mc->last_render);
32696
32697- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32698- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32699+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32700+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32701 sarea_priv->last_enqueue = dev_priv->counter - 1;
32702 sarea_priv->last_dispatch = (int)hw_status[5];
32703
32704diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32705index 21e2691..7321edd 100644
32706--- a/drivers/gpu/drm/i810/i810_drv.h
32707+++ b/drivers/gpu/drm/i810/i810_drv.h
32708@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32709 int page_flipping;
32710
32711 wait_queue_head_t irq_queue;
32712- atomic_t irq_received;
32713- atomic_t irq_emitted;
32714+ atomic_unchecked_t irq_received;
32715+ atomic_unchecked_t irq_emitted;
32716
32717 int front_offset;
32718 } drm_i810_private_t;
32719diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32720index da82afe..48a45de 100644
32721--- a/drivers/gpu/drm/i830/i830_drv.h
32722+++ b/drivers/gpu/drm/i830/i830_drv.h
32723@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32724 int page_flipping;
32725
32726 wait_queue_head_t irq_queue;
32727- atomic_t irq_received;
32728- atomic_t irq_emitted;
32729+ atomic_unchecked_t irq_received;
32730+ atomic_unchecked_t irq_emitted;
32731
32732 int use_mi_batchbuffer_start;
32733
32734diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32735index 91ec2bb..6f21fab 100644
32736--- a/drivers/gpu/drm/i830/i830_irq.c
32737+++ b/drivers/gpu/drm/i830/i830_irq.c
32738@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32739
32740 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32741
32742- atomic_inc(&dev_priv->irq_received);
32743+ atomic_inc_unchecked(&dev_priv->irq_received);
32744 wake_up_interruptible(&dev_priv->irq_queue);
32745
32746 return IRQ_HANDLED;
32747@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32748
32749 DRM_DEBUG("%s\n", __func__);
32750
32751- atomic_inc(&dev_priv->irq_emitted);
32752+ atomic_inc_unchecked(&dev_priv->irq_emitted);
32753
32754 BEGIN_LP_RING(2);
32755 OUT_RING(0);
32756 OUT_RING(GFX_OP_USER_INTERRUPT);
32757 ADVANCE_LP_RING();
32758
32759- return atomic_read(&dev_priv->irq_emitted);
32760+ return atomic_read_unchecked(&dev_priv->irq_emitted);
32761 }
32762
32763 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32764@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32765
32766 DRM_DEBUG("%s\n", __func__);
32767
32768- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32769+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32770 return 0;
32771
32772 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32773@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32774
32775 for (;;) {
32776 __set_current_state(TASK_INTERRUPTIBLE);
32777- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32778+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32779 break;
32780 if ((signed)(end - jiffies) <= 0) {
32781 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
32782@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
32783 I830_WRITE16(I830REG_HWSTAM, 0xffff);
32784 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
32785 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
32786- atomic_set(&dev_priv->irq_received, 0);
32787- atomic_set(&dev_priv->irq_emitted, 0);
32788+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32789+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
32790 init_waitqueue_head(&dev_priv->irq_queue);
32791 }
32792
32793diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
32794index 288fc50..c6092055 100644
32795--- a/drivers/gpu/drm/i915/dvo.h
32796+++ b/drivers/gpu/drm/i915/dvo.h
32797@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
32798 *
32799 * \return singly-linked list of modes or NULL if no modes found.
32800 */
32801- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
32802+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
32803
32804 /**
32805 * Clean up driver-specific bits of the output
32806 */
32807- void (*destroy) (struct intel_dvo_device *dvo);
32808+ void (* const destroy) (struct intel_dvo_device *dvo);
32809
32810 /**
32811 * Debugging hook to dump device registers to log file
32812 */
32813- void (*dump_regs)(struct intel_dvo_device *dvo);
32814+ void (* const dump_regs)(struct intel_dvo_device *dvo);
32815 };
32816
32817-extern struct intel_dvo_dev_ops sil164_ops;
32818-extern struct intel_dvo_dev_ops ch7xxx_ops;
32819-extern struct intel_dvo_dev_ops ivch_ops;
32820-extern struct intel_dvo_dev_ops tfp410_ops;
32821-extern struct intel_dvo_dev_ops ch7017_ops;
32822+extern const struct intel_dvo_dev_ops sil164_ops;
32823+extern const struct intel_dvo_dev_ops ch7xxx_ops;
32824+extern const struct intel_dvo_dev_ops ivch_ops;
32825+extern const struct intel_dvo_dev_ops tfp410_ops;
32826+extern const struct intel_dvo_dev_ops ch7017_ops;
32827
32828 #endif /* _INTEL_DVO_H */
32829diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
32830index 621815b..499d82e 100644
32831--- a/drivers/gpu/drm/i915/dvo_ch7017.c
32832+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
32833@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
32834 }
32835 }
32836
32837-struct intel_dvo_dev_ops ch7017_ops = {
32838+const struct intel_dvo_dev_ops ch7017_ops = {
32839 .init = ch7017_init,
32840 .detect = ch7017_detect,
32841 .mode_valid = ch7017_mode_valid,
32842diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32843index a9b8962..ac769ba 100644
32844--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
32845+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32846@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
32847 }
32848 }
32849
32850-struct intel_dvo_dev_ops ch7xxx_ops = {
32851+const struct intel_dvo_dev_ops ch7xxx_ops = {
32852 .init = ch7xxx_init,
32853 .detect = ch7xxx_detect,
32854 .mode_valid = ch7xxx_mode_valid,
32855diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
32856index aa176f9..ed2930c 100644
32857--- a/drivers/gpu/drm/i915/dvo_ivch.c
32858+++ b/drivers/gpu/drm/i915/dvo_ivch.c
32859@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
32860 }
32861 }
32862
32863-struct intel_dvo_dev_ops ivch_ops= {
32864+const struct intel_dvo_dev_ops ivch_ops= {
32865 .init = ivch_init,
32866 .dpms = ivch_dpms,
32867 .save = ivch_save,
32868diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
32869index e1c1f73..7dbebcf 100644
32870--- a/drivers/gpu/drm/i915/dvo_sil164.c
32871+++ b/drivers/gpu/drm/i915/dvo_sil164.c
32872@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
32873 }
32874 }
32875
32876-struct intel_dvo_dev_ops sil164_ops = {
32877+const struct intel_dvo_dev_ops sil164_ops = {
32878 .init = sil164_init,
32879 .detect = sil164_detect,
32880 .mode_valid = sil164_mode_valid,
32881diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
32882index 16dce84..7e1b6f8 100644
32883--- a/drivers/gpu/drm/i915/dvo_tfp410.c
32884+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
32885@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
32886 }
32887 }
32888
32889-struct intel_dvo_dev_ops tfp410_ops = {
32890+const struct intel_dvo_dev_ops tfp410_ops = {
32891 .init = tfp410_init,
32892 .detect = tfp410_detect,
32893 .mode_valid = tfp410_mode_valid,
32894diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
32895index 7e859d6..7d1cf2b 100644
32896--- a/drivers/gpu/drm/i915/i915_debugfs.c
32897+++ b/drivers/gpu/drm/i915/i915_debugfs.c
32898@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
32899 I915_READ(GTIMR));
32900 }
32901 seq_printf(m, "Interrupts received: %d\n",
32902- atomic_read(&dev_priv->irq_received));
32903+ atomic_read_unchecked(&dev_priv->irq_received));
32904 if (dev_priv->hw_status_page != NULL) {
32905 seq_printf(m, "Current sequence: %d\n",
32906 i915_get_gem_seqno(dev));
32907diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
32908index 5449239..7e4f68d 100644
32909--- a/drivers/gpu/drm/i915/i915_drv.c
32910+++ b/drivers/gpu/drm/i915/i915_drv.c
32911@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
32912 return i915_resume(dev);
32913 }
32914
32915-static struct vm_operations_struct i915_gem_vm_ops = {
32916+static const struct vm_operations_struct i915_gem_vm_ops = {
32917 .fault = i915_gem_fault,
32918 .open = drm_gem_vm_open,
32919 .close = drm_gem_vm_close,
32920diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
32921index 97163f7..c24c7c7 100644
32922--- a/drivers/gpu/drm/i915/i915_drv.h
32923+++ b/drivers/gpu/drm/i915/i915_drv.h
32924@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
32925 /* display clock increase/decrease */
32926 /* pll clock increase/decrease */
32927 /* clock gating init */
32928-};
32929+} __no_const;
32930
32931 typedef struct drm_i915_private {
32932 struct drm_device *dev;
32933@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
32934 int page_flipping;
32935
32936 wait_queue_head_t irq_queue;
32937- atomic_t irq_received;
32938+ atomic_unchecked_t irq_received;
32939 /** Protects user_irq_refcount and irq_mask_reg */
32940 spinlock_t user_irq_lock;
32941 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
32942diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
32943index 27a3074..eb3f959 100644
32944--- a/drivers/gpu/drm/i915/i915_gem.c
32945+++ b/drivers/gpu/drm/i915/i915_gem.c
32946@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
32947
32948 args->aper_size = dev->gtt_total;
32949 args->aper_available_size = (args->aper_size -
32950- atomic_read(&dev->pin_memory));
32951+ atomic_read_unchecked(&dev->pin_memory));
32952
32953 return 0;
32954 }
32955@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
32956
32957 if (obj_priv->gtt_space) {
32958 atomic_dec(&dev->gtt_count);
32959- atomic_sub(obj->size, &dev->gtt_memory);
32960+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
32961
32962 drm_mm_put_block(obj_priv->gtt_space);
32963 obj_priv->gtt_space = NULL;
32964@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
32965 goto search_free;
32966 }
32967 atomic_inc(&dev->gtt_count);
32968- atomic_add(obj->size, &dev->gtt_memory);
32969+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
32970
32971 /* Assert that the object is not currently in any GPU domain. As it
32972 * wasn't in the GTT, there shouldn't be any way it could have been in
32973@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
32974 "%d/%d gtt bytes\n",
32975 atomic_read(&dev->object_count),
32976 atomic_read(&dev->pin_count),
32977- atomic_read(&dev->object_memory),
32978- atomic_read(&dev->pin_memory),
32979- atomic_read(&dev->gtt_memory),
32980+ atomic_read_unchecked(&dev->object_memory),
32981+ atomic_read_unchecked(&dev->pin_memory),
32982+ atomic_read_unchecked(&dev->gtt_memory),
32983 dev->gtt_total);
32984 }
32985 goto err;
32986@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
32987 */
32988 if (obj_priv->pin_count == 1) {
32989 atomic_inc(&dev->pin_count);
32990- atomic_add(obj->size, &dev->pin_memory);
32991+ atomic_add_unchecked(obj->size, &dev->pin_memory);
32992 if (!obj_priv->active &&
32993 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
32994 !list_empty(&obj_priv->list))
32995@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
32996 list_move_tail(&obj_priv->list,
32997 &dev_priv->mm.inactive_list);
32998 atomic_dec(&dev->pin_count);
32999- atomic_sub(obj->size, &dev->pin_memory);
33000+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33001 }
33002 i915_verify_inactive(dev, __FILE__, __LINE__);
33003 }
33004diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33005index 63f28ad..f5469da 100644
33006--- a/drivers/gpu/drm/i915/i915_irq.c
33007+++ b/drivers/gpu/drm/i915/i915_irq.c
33008@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33009 int irq_received;
33010 int ret = IRQ_NONE;
33011
33012- atomic_inc(&dev_priv->irq_received);
33013+ atomic_inc_unchecked(&dev_priv->irq_received);
33014
33015 if (IS_IGDNG(dev))
33016 return igdng_irq_handler(dev);
33017@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33018 {
33019 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33020
33021- atomic_set(&dev_priv->irq_received, 0);
33022+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33023
33024 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33025 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33026diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33027index 5d9c6a7..d1b0e29 100644
33028--- a/drivers/gpu/drm/i915/intel_sdvo.c
33029+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33030@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33031 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33032
33033 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33034- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33035+ pax_open_kernel();
33036+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33037+ pax_close_kernel();
33038
33039 /* Read the regs to test if we can talk to the device */
33040 for (i = 0; i < 0x40; i++) {
33041diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33042index be6c6b9..8615d9c 100644
33043--- a/drivers/gpu/drm/mga/mga_drv.h
33044+++ b/drivers/gpu/drm/mga/mga_drv.h
33045@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33046 u32 clear_cmd;
33047 u32 maccess;
33048
33049- atomic_t vbl_received; /**< Number of vblanks received. */
33050+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33051 wait_queue_head_t fence_queue;
33052- atomic_t last_fence_retired;
33053+ atomic_unchecked_t last_fence_retired;
33054 u32 next_fence_to_post;
33055
33056 unsigned int fb_cpp;
33057diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33058index daa6041..a28a5da 100644
33059--- a/drivers/gpu/drm/mga/mga_irq.c
33060+++ b/drivers/gpu/drm/mga/mga_irq.c
33061@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33062 if (crtc != 0)
33063 return 0;
33064
33065- return atomic_read(&dev_priv->vbl_received);
33066+ return atomic_read_unchecked(&dev_priv->vbl_received);
33067 }
33068
33069
33070@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33071 /* VBLANK interrupt */
33072 if (status & MGA_VLINEPEN) {
33073 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33074- atomic_inc(&dev_priv->vbl_received);
33075+ atomic_inc_unchecked(&dev_priv->vbl_received);
33076 drm_handle_vblank(dev, 0);
33077 handled = 1;
33078 }
33079@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33080 MGA_WRITE(MGA_PRIMEND, prim_end);
33081 }
33082
33083- atomic_inc(&dev_priv->last_fence_retired);
33084+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33085 DRM_WAKEUP(&dev_priv->fence_queue);
33086 handled = 1;
33087 }
33088@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33089 * using fences.
33090 */
33091 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33092- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33093+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33094 - *sequence) <= (1 << 23)));
33095
33096 *sequence = cur_fence;
33097diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33098index 4c39a40..b22a9ea 100644
33099--- a/drivers/gpu/drm/r128/r128_cce.c
33100+++ b/drivers/gpu/drm/r128/r128_cce.c
33101@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33102
33103 /* GH: Simple idle check.
33104 */
33105- atomic_set(&dev_priv->idle_count, 0);
33106+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33107
33108 /* We don't support anything other than bus-mastering ring mode,
33109 * but the ring can be in either AGP or PCI space for the ring
33110diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33111index 3c60829..4faf484 100644
33112--- a/drivers/gpu/drm/r128/r128_drv.h
33113+++ b/drivers/gpu/drm/r128/r128_drv.h
33114@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33115 int is_pci;
33116 unsigned long cce_buffers_offset;
33117
33118- atomic_t idle_count;
33119+ atomic_unchecked_t idle_count;
33120
33121 int page_flipping;
33122 int current_page;
33123 u32 crtc_offset;
33124 u32 crtc_offset_cntl;
33125
33126- atomic_t vbl_received;
33127+ atomic_unchecked_t vbl_received;
33128
33129 u32 color_fmt;
33130 unsigned int front_offset;
33131diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33132index 69810fb..97bf17a 100644
33133--- a/drivers/gpu/drm/r128/r128_irq.c
33134+++ b/drivers/gpu/drm/r128/r128_irq.c
33135@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33136 if (crtc != 0)
33137 return 0;
33138
33139- return atomic_read(&dev_priv->vbl_received);
33140+ return atomic_read_unchecked(&dev_priv->vbl_received);
33141 }
33142
33143 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33144@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33145 /* VBLANK interrupt */
33146 if (status & R128_CRTC_VBLANK_INT) {
33147 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33148- atomic_inc(&dev_priv->vbl_received);
33149+ atomic_inc_unchecked(&dev_priv->vbl_received);
33150 drm_handle_vblank(dev, 0);
33151 return IRQ_HANDLED;
33152 }
33153diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33154index af2665c..51922d2 100644
33155--- a/drivers/gpu/drm/r128/r128_state.c
33156+++ b/drivers/gpu/drm/r128/r128_state.c
33157@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33158
33159 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33160 {
33161- if (atomic_read(&dev_priv->idle_count) == 0) {
33162+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33163 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33164 } else {
33165- atomic_set(&dev_priv->idle_count, 0);
33166+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33167 }
33168 }
33169
33170diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33171index dd72b91..8644b3c 100644
33172--- a/drivers/gpu/drm/radeon/atom.c
33173+++ b/drivers/gpu/drm/radeon/atom.c
33174@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33175 char name[512];
33176 int i;
33177
33178+ pax_track_stack();
33179+
33180 ctx->card = card;
33181 ctx->bios = bios;
33182
33183diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33184index 0d79577..efaa7a5 100644
33185--- a/drivers/gpu/drm/radeon/mkregtable.c
33186+++ b/drivers/gpu/drm/radeon/mkregtable.c
33187@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33188 regex_t mask_rex;
33189 regmatch_t match[4];
33190 char buf[1024];
33191- size_t end;
33192+ long end;
33193 int len;
33194 int done = 0;
33195 int r;
33196 unsigned o;
33197 struct offset *offset;
33198 char last_reg_s[10];
33199- int last_reg;
33200+ unsigned long last_reg;
33201
33202 if (regcomp
33203 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33204diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33205index 6735213..38c2c67 100644
33206--- a/drivers/gpu/drm/radeon/radeon.h
33207+++ b/drivers/gpu/drm/radeon/radeon.h
33208@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33209 */
33210 struct radeon_fence_driver {
33211 uint32_t scratch_reg;
33212- atomic_t seq;
33213+ atomic_unchecked_t seq;
33214 uint32_t last_seq;
33215 unsigned long count_timeout;
33216 wait_queue_head_t queue;
33217@@ -640,7 +640,7 @@ struct radeon_asic {
33218 uint32_t offset, uint32_t obj_size);
33219 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33220 void (*bandwidth_update)(struct radeon_device *rdev);
33221-};
33222+} __no_const;
33223
33224 /*
33225 * Asic structures
33226diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33227index 4e928b9..d8b6008 100644
33228--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33229+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33230@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33231 bool linkb;
33232 struct radeon_i2c_bus_rec ddc_bus;
33233
33234+ pax_track_stack();
33235+
33236 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33237
33238 if (data_offset == 0)
33239@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33240 }
33241 }
33242
33243-struct bios_connector {
33244+static struct bios_connector {
33245 bool valid;
33246 uint16_t line_mux;
33247 uint16_t devices;
33248 int connector_type;
33249 struct radeon_i2c_bus_rec ddc_bus;
33250-};
33251+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33252
33253 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33254 drm_device
33255@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33256 uint8_t dac;
33257 union atom_supported_devices *supported_devices;
33258 int i, j;
33259- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33260
33261 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33262
33263diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33264index 083a181..ccccae0 100644
33265--- a/drivers/gpu/drm/radeon/radeon_display.c
33266+++ b/drivers/gpu/drm/radeon/radeon_display.c
33267@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33268
33269 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33270 error = freq - current_freq;
33271- error = error < 0 ? 0xffffffff : error;
33272+ error = (int32_t)error < 0 ? 0xffffffff : error;
33273 } else
33274 error = abs(current_freq - freq);
33275 vco_diff = abs(vco - best_vco);
33276diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33277index 76e4070..193fa7f 100644
33278--- a/drivers/gpu/drm/radeon/radeon_drv.h
33279+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33280@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33281
33282 /* SW interrupt */
33283 wait_queue_head_t swi_queue;
33284- atomic_t swi_emitted;
33285+ atomic_unchecked_t swi_emitted;
33286 int vblank_crtc;
33287 uint32_t irq_enable_reg;
33288 uint32_t r500_disp_irq_reg;
33289diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33290index 3beb26d..6ce9c4a 100644
33291--- a/drivers/gpu/drm/radeon/radeon_fence.c
33292+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33293@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33294 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33295 return 0;
33296 }
33297- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33298+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33299 if (!rdev->cp.ready) {
33300 /* FIXME: cp is not running assume everythings is done right
33301 * away
33302@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33303 return r;
33304 }
33305 WREG32(rdev->fence_drv.scratch_reg, 0);
33306- atomic_set(&rdev->fence_drv.seq, 0);
33307+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33308 INIT_LIST_HEAD(&rdev->fence_drv.created);
33309 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33310 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33311diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33312index a1bf11d..4a123c0 100644
33313--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33314+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33315@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33316 request = compat_alloc_user_space(sizeof(*request));
33317 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33318 || __put_user(req32.param, &request->param)
33319- || __put_user((void __user *)(unsigned long)req32.value,
33320+ || __put_user((unsigned long)req32.value,
33321 &request->value))
33322 return -EFAULT;
33323
33324diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33325index b79ecc4..8dab92d 100644
33326--- a/drivers/gpu/drm/radeon/radeon_irq.c
33327+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33328@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33329 unsigned int ret;
33330 RING_LOCALS;
33331
33332- atomic_inc(&dev_priv->swi_emitted);
33333- ret = atomic_read(&dev_priv->swi_emitted);
33334+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33335+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33336
33337 BEGIN_RING(4);
33338 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33339@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33340 drm_radeon_private_t *dev_priv =
33341 (drm_radeon_private_t *) dev->dev_private;
33342
33343- atomic_set(&dev_priv->swi_emitted, 0);
33344+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33345 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33346
33347 dev->max_vblank_count = 0x001fffff;
33348diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33349index 4747910..48ca4b3 100644
33350--- a/drivers/gpu/drm/radeon/radeon_state.c
33351+++ b/drivers/gpu/drm/radeon/radeon_state.c
33352@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33353 {
33354 drm_radeon_private_t *dev_priv = dev->dev_private;
33355 drm_radeon_getparam_t *param = data;
33356- int value;
33357+ int value = 0;
33358
33359 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33360
33361diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33362index 1381e06..0e53b17 100644
33363--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33364+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33365@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33366 DRM_INFO("radeon: ttm finalized\n");
33367 }
33368
33369-static struct vm_operations_struct radeon_ttm_vm_ops;
33370-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33371-
33372-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33373-{
33374- struct ttm_buffer_object *bo;
33375- int r;
33376-
33377- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33378- if (bo == NULL) {
33379- return VM_FAULT_NOPAGE;
33380- }
33381- r = ttm_vm_ops->fault(vma, vmf);
33382- return r;
33383-}
33384-
33385 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33386 {
33387 struct drm_file *file_priv;
33388 struct radeon_device *rdev;
33389- int r;
33390
33391 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33392 return drm_mmap(filp, vma);
33393@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33394
33395 file_priv = (struct drm_file *)filp->private_data;
33396 rdev = file_priv->minor->dev->dev_private;
33397- if (rdev == NULL) {
33398+ if (!rdev)
33399 return -EINVAL;
33400- }
33401- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33402- if (unlikely(r != 0)) {
33403- return r;
33404- }
33405- if (unlikely(ttm_vm_ops == NULL)) {
33406- ttm_vm_ops = vma->vm_ops;
33407- radeon_ttm_vm_ops = *ttm_vm_ops;
33408- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33409- }
33410- vma->vm_ops = &radeon_ttm_vm_ops;
33411- return 0;
33412+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33413 }
33414
33415
33416diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33417index b12ff76..0bd0c6e 100644
33418--- a/drivers/gpu/drm/radeon/rs690.c
33419+++ b/drivers/gpu/drm/radeon/rs690.c
33420@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33421 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33422 rdev->pm.sideport_bandwidth.full)
33423 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33424- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33425+ read_delay_latency.full = rfixed_const(800 * 1000);
33426 read_delay_latency.full = rfixed_div(read_delay_latency,
33427 rdev->pm.igp_sideport_mclk);
33428+ a.full = rfixed_const(370);
33429+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33430 } else {
33431 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33432 rdev->pm.k8_bandwidth.full)
33433diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33434index 0ed436e..e6e7ce3 100644
33435--- a/drivers/gpu/drm/ttm/ttm_bo.c
33436+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33437@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33438 NULL
33439 };
33440
33441-static struct sysfs_ops ttm_bo_global_ops = {
33442+static const struct sysfs_ops ttm_bo_global_ops = {
33443 .show = &ttm_bo_global_show
33444 };
33445
33446diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33447index 1c040d0..f9e4af8 100644
33448--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33449+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33450@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33451 {
33452 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33453 vma->vm_private_data;
33454- struct ttm_bo_device *bdev = bo->bdev;
33455+ struct ttm_bo_device *bdev;
33456 unsigned long bus_base;
33457 unsigned long bus_offset;
33458 unsigned long bus_size;
33459@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33460 unsigned long address = (unsigned long)vmf->virtual_address;
33461 int retval = VM_FAULT_NOPAGE;
33462
33463+ if (!bo)
33464+ return VM_FAULT_NOPAGE;
33465+ bdev = bo->bdev;
33466+
33467 /*
33468 * Work around locking order reversal in fault / nopfn
33469 * between mmap_sem and bo_reserve: Perform a trylock operation
33470diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33471index b170071..28ae90e 100644
33472--- a/drivers/gpu/drm/ttm/ttm_global.c
33473+++ b/drivers/gpu/drm/ttm/ttm_global.c
33474@@ -36,7 +36,7 @@
33475 struct ttm_global_item {
33476 struct mutex mutex;
33477 void *object;
33478- int refcount;
33479+ atomic_t refcount;
33480 };
33481
33482 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33483@@ -49,7 +49,7 @@ void ttm_global_init(void)
33484 struct ttm_global_item *item = &glob[i];
33485 mutex_init(&item->mutex);
33486 item->object = NULL;
33487- item->refcount = 0;
33488+ atomic_set(&item->refcount, 0);
33489 }
33490 }
33491
33492@@ -59,7 +59,7 @@ void ttm_global_release(void)
33493 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33494 struct ttm_global_item *item = &glob[i];
33495 BUG_ON(item->object != NULL);
33496- BUG_ON(item->refcount != 0);
33497+ BUG_ON(atomic_read(&item->refcount) != 0);
33498 }
33499 }
33500
33501@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33502 void *object;
33503
33504 mutex_lock(&item->mutex);
33505- if (item->refcount == 0) {
33506+ if (atomic_read(&item->refcount) == 0) {
33507 item->object = kzalloc(ref->size, GFP_KERNEL);
33508 if (unlikely(item->object == NULL)) {
33509 ret = -ENOMEM;
33510@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33511 goto out_err;
33512
33513 }
33514- ++item->refcount;
33515+ atomic_inc(&item->refcount);
33516 ref->object = item->object;
33517 object = item->object;
33518 mutex_unlock(&item->mutex);
33519@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33520 struct ttm_global_item *item = &glob[ref->global_type];
33521
33522 mutex_lock(&item->mutex);
33523- BUG_ON(item->refcount == 0);
33524+ BUG_ON(atomic_read(&item->refcount) == 0);
33525 BUG_ON(ref->object != item->object);
33526- if (--item->refcount == 0) {
33527+ if (atomic_dec_and_test(&item->refcount)) {
33528 ref->release(ref);
33529 item->object = NULL;
33530 }
33531diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33532index 072c281..d8ef483 100644
33533--- a/drivers/gpu/drm/ttm/ttm_memory.c
33534+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33535@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33536 NULL
33537 };
33538
33539-static struct sysfs_ops ttm_mem_zone_ops = {
33540+static const struct sysfs_ops ttm_mem_zone_ops = {
33541 .show = &ttm_mem_zone_show,
33542 .store = &ttm_mem_zone_store
33543 };
33544diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33545index cafcb84..b8e66cc 100644
33546--- a/drivers/gpu/drm/via/via_drv.h
33547+++ b/drivers/gpu/drm/via/via_drv.h
33548@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33549 typedef uint32_t maskarray_t[5];
33550
33551 typedef struct drm_via_irq {
33552- atomic_t irq_received;
33553+ atomic_unchecked_t irq_received;
33554 uint32_t pending_mask;
33555 uint32_t enable_mask;
33556 wait_queue_head_t irq_queue;
33557@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33558 struct timeval last_vblank;
33559 int last_vblank_valid;
33560 unsigned usec_per_vblank;
33561- atomic_t vbl_received;
33562+ atomic_unchecked_t vbl_received;
33563 drm_via_state_t hc_state;
33564 char pci_buf[VIA_PCI_BUF_SIZE];
33565 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33566diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33567index 5935b88..127a8a6 100644
33568--- a/drivers/gpu/drm/via/via_irq.c
33569+++ b/drivers/gpu/drm/via/via_irq.c
33570@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33571 if (crtc != 0)
33572 return 0;
33573
33574- return atomic_read(&dev_priv->vbl_received);
33575+ return atomic_read_unchecked(&dev_priv->vbl_received);
33576 }
33577
33578 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33579@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33580
33581 status = VIA_READ(VIA_REG_INTERRUPT);
33582 if (status & VIA_IRQ_VBLANK_PENDING) {
33583- atomic_inc(&dev_priv->vbl_received);
33584- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33585+ atomic_inc_unchecked(&dev_priv->vbl_received);
33586+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33587 do_gettimeofday(&cur_vblank);
33588 if (dev_priv->last_vblank_valid) {
33589 dev_priv->usec_per_vblank =
33590@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33591 dev_priv->last_vblank = cur_vblank;
33592 dev_priv->last_vblank_valid = 1;
33593 }
33594- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33595+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33596 DRM_DEBUG("US per vblank is: %u\n",
33597 dev_priv->usec_per_vblank);
33598 }
33599@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33600
33601 for (i = 0; i < dev_priv->num_irqs; ++i) {
33602 if (status & cur_irq->pending_mask) {
33603- atomic_inc(&cur_irq->irq_received);
33604+ atomic_inc_unchecked(&cur_irq->irq_received);
33605 DRM_WAKEUP(&cur_irq->irq_queue);
33606 handled = 1;
33607 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33608@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33609 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33610 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33611 masks[irq][4]));
33612- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33613+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33614 } else {
33615 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33616 (((cur_irq_sequence =
33617- atomic_read(&cur_irq->irq_received)) -
33618+ atomic_read_unchecked(&cur_irq->irq_received)) -
33619 *sequence) <= (1 << 23)));
33620 }
33621 *sequence = cur_irq_sequence;
33622@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33623 }
33624
33625 for (i = 0; i < dev_priv->num_irqs; ++i) {
33626- atomic_set(&cur_irq->irq_received, 0);
33627+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33628 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33629 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33630 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33631@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33632 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33633 case VIA_IRQ_RELATIVE:
33634 irqwait->request.sequence +=
33635- atomic_read(&cur_irq->irq_received);
33636+ atomic_read_unchecked(&cur_irq->irq_received);
33637 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33638 case VIA_IRQ_ABSOLUTE:
33639 break;
33640diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33641index aa8688d..6a0140c 100644
33642--- a/drivers/gpu/vga/vgaarb.c
33643+++ b/drivers/gpu/vga/vgaarb.c
33644@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33645 uc = &priv->cards[i];
33646 }
33647
33648- if (!uc)
33649- return -EINVAL;
33650+ if (!uc) {
33651+ ret_val = -EINVAL;
33652+ goto done;
33653+ }
33654
33655- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33656- return -EINVAL;
33657+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33658+ ret_val = -EINVAL;
33659+ goto done;
33660+ }
33661
33662- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33663- return -EINVAL;
33664+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33665+ ret_val = -EINVAL;
33666+ goto done;
33667+ }
33668
33669 vga_put(pdev, io_state);
33670
33671diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33672index f3f1415..e561d90 100644
33673--- a/drivers/hid/hid-core.c
33674+++ b/drivers/hid/hid-core.c
33675@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33676
33677 int hid_add_device(struct hid_device *hdev)
33678 {
33679- static atomic_t id = ATOMIC_INIT(0);
33680+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33681 int ret;
33682
33683 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33684@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33685 /* XXX hack, any other cleaner solution after the driver core
33686 * is converted to allow more than 20 bytes as the device name? */
33687 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33688- hdev->vendor, hdev->product, atomic_inc_return(&id));
33689+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33690
33691 ret = device_add(&hdev->dev);
33692 if (!ret)
33693diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33694index 8b6ee24..70f657d 100644
33695--- a/drivers/hid/usbhid/hiddev.c
33696+++ b/drivers/hid/usbhid/hiddev.c
33697@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33698 return put_user(HID_VERSION, (int __user *)arg);
33699
33700 case HIDIOCAPPLICATION:
33701- if (arg < 0 || arg >= hid->maxapplication)
33702+ if (arg >= hid->maxapplication)
33703 return -EINVAL;
33704
33705 for (i = 0; i < hid->maxcollection; i++)
33706diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33707index 5d5ed69..f40533e 100644
33708--- a/drivers/hwmon/lis3lv02d.c
33709+++ b/drivers/hwmon/lis3lv02d.c
33710@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33711 * the lid is closed. This leads to interrupts as soon as a little move
33712 * is done.
33713 */
33714- atomic_inc(&lis3_dev.count);
33715+ atomic_inc_unchecked(&lis3_dev.count);
33716
33717 wake_up_interruptible(&lis3_dev.misc_wait);
33718 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33719@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33720 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33721 return -EBUSY; /* already open */
33722
33723- atomic_set(&lis3_dev.count, 0);
33724+ atomic_set_unchecked(&lis3_dev.count, 0);
33725
33726 /*
33727 * The sensor can generate interrupts for free-fall and direction
33728@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33729 add_wait_queue(&lis3_dev.misc_wait, &wait);
33730 while (true) {
33731 set_current_state(TASK_INTERRUPTIBLE);
33732- data = atomic_xchg(&lis3_dev.count, 0);
33733+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33734 if (data)
33735 break;
33736
33737@@ -244,7 +244,7 @@ out:
33738 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33739 {
33740 poll_wait(file, &lis3_dev.misc_wait, wait);
33741- if (atomic_read(&lis3_dev.count))
33742+ if (atomic_read_unchecked(&lis3_dev.count))
33743 return POLLIN | POLLRDNORM;
33744 return 0;
33745 }
33746diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33747index 7cdd76f..fe0efdf 100644
33748--- a/drivers/hwmon/lis3lv02d.h
33749+++ b/drivers/hwmon/lis3lv02d.h
33750@@ -201,7 +201,7 @@ struct lis3lv02d {
33751
33752 struct input_polled_dev *idev; /* input device */
33753 struct platform_device *pdev; /* platform device */
33754- atomic_t count; /* interrupt count after last read */
33755+ atomic_unchecked_t count; /* interrupt count after last read */
33756 int xcalib; /* calibrated null value for x */
33757 int ycalib; /* calibrated null value for y */
33758 int zcalib; /* calibrated null value for z */
33759diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33760index 2040507..706ec1e 100644
33761--- a/drivers/hwmon/sht15.c
33762+++ b/drivers/hwmon/sht15.c
33763@@ -112,7 +112,7 @@ struct sht15_data {
33764 int supply_uV;
33765 int supply_uV_valid;
33766 struct work_struct update_supply_work;
33767- atomic_t interrupt_handled;
33768+ atomic_unchecked_t interrupt_handled;
33769 };
33770
33771 /**
33772@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33773 return ret;
33774
33775 gpio_direction_input(data->pdata->gpio_data);
33776- atomic_set(&data->interrupt_handled, 0);
33777+ atomic_set_unchecked(&data->interrupt_handled, 0);
33778
33779 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33780 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33781 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33782 /* Only relevant if the interrupt hasn't occured. */
33783- if (!atomic_read(&data->interrupt_handled))
33784+ if (!atomic_read_unchecked(&data->interrupt_handled))
33785 schedule_work(&data->read_work);
33786 }
33787 ret = wait_event_timeout(data->wait_queue,
33788@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33789 struct sht15_data *data = d;
33790 /* First disable the interrupt */
33791 disable_irq_nosync(irq);
33792- atomic_inc(&data->interrupt_handled);
33793+ atomic_inc_unchecked(&data->interrupt_handled);
33794 /* Then schedule a reading work struct */
33795 if (data->flag != SHT15_READING_NOTHING)
33796 schedule_work(&data->read_work);
33797@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33798 here as could have gone low in meantime so verify
33799 it hasn't!
33800 */
33801- atomic_set(&data->interrupt_handled, 0);
33802+ atomic_set_unchecked(&data->interrupt_handled, 0);
33803 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33804 /* If still not occured or another handler has been scheduled */
33805 if (gpio_get_value(data->pdata->gpio_data)
33806- || atomic_read(&data->interrupt_handled))
33807+ || atomic_read_unchecked(&data->interrupt_handled))
33808 return;
33809 }
33810 /* Read the data back from the device */
33811diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
33812index 97851c5..cb40626 100644
33813--- a/drivers/hwmon/w83791d.c
33814+++ b/drivers/hwmon/w83791d.c
33815@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
33816 struct i2c_board_info *info);
33817 static int w83791d_remove(struct i2c_client *client);
33818
33819-static int w83791d_read(struct i2c_client *client, u8 register);
33820-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
33821+static int w83791d_read(struct i2c_client *client, u8 reg);
33822+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
33823 static struct w83791d_data *w83791d_update_device(struct device *dev);
33824
33825 #ifdef DEBUG
33826diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33827index 378fcb5..5e91fa8 100644
33828--- a/drivers/i2c/busses/i2c-amd756-s4882.c
33829+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33830@@ -43,7 +43,7 @@
33831 extern struct i2c_adapter amd756_smbus;
33832
33833 static struct i2c_adapter *s4882_adapter;
33834-static struct i2c_algorithm *s4882_algo;
33835+static i2c_algorithm_no_const *s4882_algo;
33836
33837 /* Wrapper access functions for multiplexed SMBus */
33838 static DEFINE_MUTEX(amd756_lock);
33839diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33840index 29015eb..af2d8e9 100644
33841--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33842+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33843@@ -41,7 +41,7 @@
33844 extern struct i2c_adapter *nforce2_smbus;
33845
33846 static struct i2c_adapter *s4985_adapter;
33847-static struct i2c_algorithm *s4985_algo;
33848+static i2c_algorithm_no_const *s4985_algo;
33849
33850 /* Wrapper access functions for multiplexed SMBus */
33851 static DEFINE_MUTEX(nforce2_lock);
33852diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
33853index 878f8ec..12376fc 100644
33854--- a/drivers/ide/aec62xx.c
33855+++ b/drivers/ide/aec62xx.c
33856@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
33857 .cable_detect = atp86x_cable_detect,
33858 };
33859
33860-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
33861+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
33862 { /* 0: AEC6210 */
33863 .name = DRV_NAME,
33864 .init_chipset = init_chipset_aec62xx,
33865diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
33866index e59b6de..4b4fc65 100644
33867--- a/drivers/ide/alim15x3.c
33868+++ b/drivers/ide/alim15x3.c
33869@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
33870 .dma_sff_read_status = ide_dma_sff_read_status,
33871 };
33872
33873-static const struct ide_port_info ali15x3_chipset __devinitdata = {
33874+static const struct ide_port_info ali15x3_chipset __devinitconst = {
33875 .name = DRV_NAME,
33876 .init_chipset = init_chipset_ali15x3,
33877 .init_hwif = init_hwif_ali15x3,
33878diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
33879index 628cd2e..087a414 100644
33880--- a/drivers/ide/amd74xx.c
33881+++ b/drivers/ide/amd74xx.c
33882@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
33883 .udma_mask = udma, \
33884 }
33885
33886-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
33887+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
33888 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
33889 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
33890 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
33891diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
33892index 837322b..837fd71 100644
33893--- a/drivers/ide/atiixp.c
33894+++ b/drivers/ide/atiixp.c
33895@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
33896 .cable_detect = atiixp_cable_detect,
33897 };
33898
33899-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
33900+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
33901 { /* 0: IXP200/300/400/700 */
33902 .name = DRV_NAME,
33903 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
33904diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
33905index ca0c46f..d55318a 100644
33906--- a/drivers/ide/cmd64x.c
33907+++ b/drivers/ide/cmd64x.c
33908@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
33909 .dma_sff_read_status = ide_dma_sff_read_status,
33910 };
33911
33912-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
33913+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
33914 { /* 0: CMD643 */
33915 .name = DRV_NAME,
33916 .init_chipset = init_chipset_cmd64x,
33917diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
33918index 09f98ed..cebc5bc 100644
33919--- a/drivers/ide/cs5520.c
33920+++ b/drivers/ide/cs5520.c
33921@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
33922 .set_dma_mode = cs5520_set_dma_mode,
33923 };
33924
33925-static const struct ide_port_info cyrix_chipset __devinitdata = {
33926+static const struct ide_port_info cyrix_chipset __devinitconst = {
33927 .name = DRV_NAME,
33928 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
33929 .port_ops = &cs5520_port_ops,
33930diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
33931index 40bf05e..7d58ca0 100644
33932--- a/drivers/ide/cs5530.c
33933+++ b/drivers/ide/cs5530.c
33934@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
33935 .udma_filter = cs5530_udma_filter,
33936 };
33937
33938-static const struct ide_port_info cs5530_chipset __devinitdata = {
33939+static const struct ide_port_info cs5530_chipset __devinitconst = {
33940 .name = DRV_NAME,
33941 .init_chipset = init_chipset_cs5530,
33942 .init_hwif = init_hwif_cs5530,
33943diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
33944index 983d957..53e6172 100644
33945--- a/drivers/ide/cs5535.c
33946+++ b/drivers/ide/cs5535.c
33947@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
33948 .cable_detect = cs5535_cable_detect,
33949 };
33950
33951-static const struct ide_port_info cs5535_chipset __devinitdata = {
33952+static const struct ide_port_info cs5535_chipset __devinitconst = {
33953 .name = DRV_NAME,
33954 .port_ops = &cs5535_port_ops,
33955 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
33956diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
33957index 74fc540..8e933d8 100644
33958--- a/drivers/ide/cy82c693.c
33959+++ b/drivers/ide/cy82c693.c
33960@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
33961 .set_dma_mode = cy82c693_set_dma_mode,
33962 };
33963
33964-static const struct ide_port_info cy82c693_chipset __devinitdata = {
33965+static const struct ide_port_info cy82c693_chipset __devinitconst = {
33966 .name = DRV_NAME,
33967 .init_iops = init_iops_cy82c693,
33968 .port_ops = &cy82c693_port_ops,
33969diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
33970index 7ce68ef..e78197d 100644
33971--- a/drivers/ide/hpt366.c
33972+++ b/drivers/ide/hpt366.c
33973@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
33974 }
33975 };
33976
33977-static const struct hpt_info hpt36x __devinitdata = {
33978+static const struct hpt_info hpt36x __devinitconst = {
33979 .chip_name = "HPT36x",
33980 .chip_type = HPT36x,
33981 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
33982@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
33983 .timings = &hpt36x_timings
33984 };
33985
33986-static const struct hpt_info hpt370 __devinitdata = {
33987+static const struct hpt_info hpt370 __devinitconst = {
33988 .chip_name = "HPT370",
33989 .chip_type = HPT370,
33990 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
33991@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
33992 .timings = &hpt37x_timings
33993 };
33994
33995-static const struct hpt_info hpt370a __devinitdata = {
33996+static const struct hpt_info hpt370a __devinitconst = {
33997 .chip_name = "HPT370A",
33998 .chip_type = HPT370A,
33999 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34000@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34001 .timings = &hpt37x_timings
34002 };
34003
34004-static const struct hpt_info hpt374 __devinitdata = {
34005+static const struct hpt_info hpt374 __devinitconst = {
34006 .chip_name = "HPT374",
34007 .chip_type = HPT374,
34008 .udma_mask = ATA_UDMA5,
34009@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34010 .timings = &hpt37x_timings
34011 };
34012
34013-static const struct hpt_info hpt372 __devinitdata = {
34014+static const struct hpt_info hpt372 __devinitconst = {
34015 .chip_name = "HPT372",
34016 .chip_type = HPT372,
34017 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34018@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34019 .timings = &hpt37x_timings
34020 };
34021
34022-static const struct hpt_info hpt372a __devinitdata = {
34023+static const struct hpt_info hpt372a __devinitconst = {
34024 .chip_name = "HPT372A",
34025 .chip_type = HPT372A,
34026 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34027@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34028 .timings = &hpt37x_timings
34029 };
34030
34031-static const struct hpt_info hpt302 __devinitdata = {
34032+static const struct hpt_info hpt302 __devinitconst = {
34033 .chip_name = "HPT302",
34034 .chip_type = HPT302,
34035 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34036@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34037 .timings = &hpt37x_timings
34038 };
34039
34040-static const struct hpt_info hpt371 __devinitdata = {
34041+static const struct hpt_info hpt371 __devinitconst = {
34042 .chip_name = "HPT371",
34043 .chip_type = HPT371,
34044 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34045@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34046 .timings = &hpt37x_timings
34047 };
34048
34049-static const struct hpt_info hpt372n __devinitdata = {
34050+static const struct hpt_info hpt372n __devinitconst = {
34051 .chip_name = "HPT372N",
34052 .chip_type = HPT372N,
34053 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34054@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34055 .timings = &hpt37x_timings
34056 };
34057
34058-static const struct hpt_info hpt302n __devinitdata = {
34059+static const struct hpt_info hpt302n __devinitconst = {
34060 .chip_name = "HPT302N",
34061 .chip_type = HPT302N,
34062 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34063@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34064 .timings = &hpt37x_timings
34065 };
34066
34067-static const struct hpt_info hpt371n __devinitdata = {
34068+static const struct hpt_info hpt371n __devinitconst = {
34069 .chip_name = "HPT371N",
34070 .chip_type = HPT371N,
34071 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34072@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34073 .dma_sff_read_status = ide_dma_sff_read_status,
34074 };
34075
34076-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34077+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34078 { /* 0: HPT36x */
34079 .name = DRV_NAME,
34080 .init_chipset = init_chipset_hpt366,
34081diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34082index 2de76cc..74186a1 100644
34083--- a/drivers/ide/ide-cd.c
34084+++ b/drivers/ide/ide-cd.c
34085@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34086 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34087 if ((unsigned long)buf & alignment
34088 || blk_rq_bytes(rq) & q->dma_pad_mask
34089- || object_is_on_stack(buf))
34090+ || object_starts_on_stack(buf))
34091 drive->dma = 0;
34092 }
34093 }
34094diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34095index fefbdfc..62ff465 100644
34096--- a/drivers/ide/ide-floppy.c
34097+++ b/drivers/ide/ide-floppy.c
34098@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34099 u8 pc_buf[256], header_len, desc_cnt;
34100 int i, rc = 1, blocks, length;
34101
34102+ pax_track_stack();
34103+
34104 ide_debug_log(IDE_DBG_FUNC, "enter");
34105
34106 drive->bios_cyl = 0;
34107diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34108index 39d4e01..11538ce 100644
34109--- a/drivers/ide/ide-pci-generic.c
34110+++ b/drivers/ide/ide-pci-generic.c
34111@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34112 .udma_mask = ATA_UDMA6, \
34113 }
34114
34115-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34116+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34117 /* 0: Unknown */
34118 DECLARE_GENERIC_PCI_DEV(0),
34119
34120diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34121index 0d266a5..aaca790 100644
34122--- a/drivers/ide/it8172.c
34123+++ b/drivers/ide/it8172.c
34124@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34125 .set_dma_mode = it8172_set_dma_mode,
34126 };
34127
34128-static const struct ide_port_info it8172_port_info __devinitdata = {
34129+static const struct ide_port_info it8172_port_info __devinitconst = {
34130 .name = DRV_NAME,
34131 .port_ops = &it8172_port_ops,
34132 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34133diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34134index 4797616..4be488a 100644
34135--- a/drivers/ide/it8213.c
34136+++ b/drivers/ide/it8213.c
34137@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34138 .cable_detect = it8213_cable_detect,
34139 };
34140
34141-static const struct ide_port_info it8213_chipset __devinitdata = {
34142+static const struct ide_port_info it8213_chipset __devinitconst = {
34143 .name = DRV_NAME,
34144 .enablebits = { {0x41, 0x80, 0x80} },
34145 .port_ops = &it8213_port_ops,
34146diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34147index 51aa745..146ee60 100644
34148--- a/drivers/ide/it821x.c
34149+++ b/drivers/ide/it821x.c
34150@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34151 .cable_detect = it821x_cable_detect,
34152 };
34153
34154-static const struct ide_port_info it821x_chipset __devinitdata = {
34155+static const struct ide_port_info it821x_chipset __devinitconst = {
34156 .name = DRV_NAME,
34157 .init_chipset = init_chipset_it821x,
34158 .init_hwif = init_hwif_it821x,
34159diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34160index bf2be64..9270098 100644
34161--- a/drivers/ide/jmicron.c
34162+++ b/drivers/ide/jmicron.c
34163@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34164 .cable_detect = jmicron_cable_detect,
34165 };
34166
34167-static const struct ide_port_info jmicron_chipset __devinitdata = {
34168+static const struct ide_port_info jmicron_chipset __devinitconst = {
34169 .name = DRV_NAME,
34170 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34171 .port_ops = &jmicron_port_ops,
34172diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34173index 95327a2..73f78d8 100644
34174--- a/drivers/ide/ns87415.c
34175+++ b/drivers/ide/ns87415.c
34176@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34177 .dma_sff_read_status = superio_dma_sff_read_status,
34178 };
34179
34180-static const struct ide_port_info ns87415_chipset __devinitdata = {
34181+static const struct ide_port_info ns87415_chipset __devinitconst = {
34182 .name = DRV_NAME,
34183 .init_hwif = init_hwif_ns87415,
34184 .tp_ops = &ns87415_tp_ops,
34185diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34186index f1d70d6..e1de05b 100644
34187--- a/drivers/ide/opti621.c
34188+++ b/drivers/ide/opti621.c
34189@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34190 .set_pio_mode = opti621_set_pio_mode,
34191 };
34192
34193-static const struct ide_port_info opti621_chipset __devinitdata = {
34194+static const struct ide_port_info opti621_chipset __devinitconst = {
34195 .name = DRV_NAME,
34196 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34197 .port_ops = &opti621_port_ops,
34198diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34199index 65ba823..7311f4d 100644
34200--- a/drivers/ide/pdc202xx_new.c
34201+++ b/drivers/ide/pdc202xx_new.c
34202@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34203 .udma_mask = udma, \
34204 }
34205
34206-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34207+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34208 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34209 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34210 };
34211diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34212index cb812f3..af816ef 100644
34213--- a/drivers/ide/pdc202xx_old.c
34214+++ b/drivers/ide/pdc202xx_old.c
34215@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34216 .max_sectors = sectors, \
34217 }
34218
34219-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34220+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34221 { /* 0: PDC20246 */
34222 .name = DRV_NAME,
34223 .init_chipset = init_chipset_pdc202xx,
34224diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34225index bf14f39..15c4b98 100644
34226--- a/drivers/ide/piix.c
34227+++ b/drivers/ide/piix.c
34228@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34229 .udma_mask = udma, \
34230 }
34231
34232-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34233+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34234 /* 0: MPIIX */
34235 { /*
34236 * MPIIX actually has only a single IDE channel mapped to
34237diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34238index a6414a8..c04173e 100644
34239--- a/drivers/ide/rz1000.c
34240+++ b/drivers/ide/rz1000.c
34241@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34242 }
34243 }
34244
34245-static const struct ide_port_info rz1000_chipset __devinitdata = {
34246+static const struct ide_port_info rz1000_chipset __devinitconst = {
34247 .name = DRV_NAME,
34248 .host_flags = IDE_HFLAG_NO_DMA,
34249 };
34250diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34251index d467478..9203942 100644
34252--- a/drivers/ide/sc1200.c
34253+++ b/drivers/ide/sc1200.c
34254@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34255 .dma_sff_read_status = ide_dma_sff_read_status,
34256 };
34257
34258-static const struct ide_port_info sc1200_chipset __devinitdata = {
34259+static const struct ide_port_info sc1200_chipset __devinitconst = {
34260 .name = DRV_NAME,
34261 .port_ops = &sc1200_port_ops,
34262 .dma_ops = &sc1200_dma_ops,
34263diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34264index 1104bb3..59c5194 100644
34265--- a/drivers/ide/scc_pata.c
34266+++ b/drivers/ide/scc_pata.c
34267@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34268 .dma_sff_read_status = scc_dma_sff_read_status,
34269 };
34270
34271-static const struct ide_port_info scc_chipset __devinitdata = {
34272+static const struct ide_port_info scc_chipset __devinitconst = {
34273 .name = "sccIDE",
34274 .init_iops = init_iops_scc,
34275 .init_dma = scc_init_dma,
34276diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34277index b6554ef..6cc2cc3 100644
34278--- a/drivers/ide/serverworks.c
34279+++ b/drivers/ide/serverworks.c
34280@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34281 .cable_detect = svwks_cable_detect,
34282 };
34283
34284-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34285+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34286 { /* 0: OSB4 */
34287 .name = DRV_NAME,
34288 .init_chipset = init_chipset_svwks,
34289diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34290index ab3db61..afed580 100644
34291--- a/drivers/ide/setup-pci.c
34292+++ b/drivers/ide/setup-pci.c
34293@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34294 int ret, i, n_ports = dev2 ? 4 : 2;
34295 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34296
34297+ pax_track_stack();
34298+
34299 for (i = 0; i < n_ports / 2; i++) {
34300 ret = ide_setup_pci_controller(pdev[i], d, !i);
34301 if (ret < 0)
34302diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34303index d95df52..0b03a39 100644
34304--- a/drivers/ide/siimage.c
34305+++ b/drivers/ide/siimage.c
34306@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34307 .udma_mask = ATA_UDMA6, \
34308 }
34309
34310-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34311+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34312 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34313 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34314 };
34315diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34316index 3b88eba..ca8699d 100644
34317--- a/drivers/ide/sis5513.c
34318+++ b/drivers/ide/sis5513.c
34319@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34320 .cable_detect = sis_cable_detect,
34321 };
34322
34323-static const struct ide_port_info sis5513_chipset __devinitdata = {
34324+static const struct ide_port_info sis5513_chipset __devinitconst = {
34325 .name = DRV_NAME,
34326 .init_chipset = init_chipset_sis5513,
34327 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34328diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34329index d698da4..fca42a4 100644
34330--- a/drivers/ide/sl82c105.c
34331+++ b/drivers/ide/sl82c105.c
34332@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34333 .dma_sff_read_status = ide_dma_sff_read_status,
34334 };
34335
34336-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34337+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34338 .name = DRV_NAME,
34339 .init_chipset = init_chipset_sl82c105,
34340 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34341diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34342index 1ccfb40..83d5779 100644
34343--- a/drivers/ide/slc90e66.c
34344+++ b/drivers/ide/slc90e66.c
34345@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34346 .cable_detect = slc90e66_cable_detect,
34347 };
34348
34349-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34350+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34351 .name = DRV_NAME,
34352 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34353 .port_ops = &slc90e66_port_ops,
34354diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34355index 05a93d6..5f9e325 100644
34356--- a/drivers/ide/tc86c001.c
34357+++ b/drivers/ide/tc86c001.c
34358@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34359 .dma_sff_read_status = ide_dma_sff_read_status,
34360 };
34361
34362-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34363+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34364 .name = DRV_NAME,
34365 .init_hwif = init_hwif_tc86c001,
34366 .port_ops = &tc86c001_port_ops,
34367diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34368index 8773c3b..7907d6c 100644
34369--- a/drivers/ide/triflex.c
34370+++ b/drivers/ide/triflex.c
34371@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34372 .set_dma_mode = triflex_set_mode,
34373 };
34374
34375-static const struct ide_port_info triflex_device __devinitdata = {
34376+static const struct ide_port_info triflex_device __devinitconst = {
34377 .name = DRV_NAME,
34378 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34379 .port_ops = &triflex_port_ops,
34380diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34381index 4b42ca0..e494a98 100644
34382--- a/drivers/ide/trm290.c
34383+++ b/drivers/ide/trm290.c
34384@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34385 .dma_check = trm290_dma_check,
34386 };
34387
34388-static const struct ide_port_info trm290_chipset __devinitdata = {
34389+static const struct ide_port_info trm290_chipset __devinitconst = {
34390 .name = DRV_NAME,
34391 .init_hwif = init_hwif_trm290,
34392 .tp_ops = &trm290_tp_ops,
34393diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34394index 028de26..520d5d5 100644
34395--- a/drivers/ide/via82cxxx.c
34396+++ b/drivers/ide/via82cxxx.c
34397@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34398 .cable_detect = via82cxxx_cable_detect,
34399 };
34400
34401-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34402+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34403 .name = DRV_NAME,
34404 .init_chipset = init_chipset_via82cxxx,
34405 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34406diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34407index 2cd00b5..14de699 100644
34408--- a/drivers/ieee1394/dv1394.c
34409+++ b/drivers/ieee1394/dv1394.c
34410@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34411 based upon DIF section and sequence
34412 */
34413
34414-static void inline
34415+static inline void
34416 frame_put_packet (struct frame *f, struct packet *p)
34417 {
34418 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34419diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34420index e947d8f..6a966b9 100644
34421--- a/drivers/ieee1394/hosts.c
34422+++ b/drivers/ieee1394/hosts.c
34423@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34424 }
34425
34426 static struct hpsb_host_driver dummy_driver = {
34427+ .name = "dummy",
34428 .transmit_packet = dummy_transmit_packet,
34429 .devctl = dummy_devctl,
34430 .isoctl = dummy_isoctl
34431diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34432index ddaab6e..8d37435 100644
34433--- a/drivers/ieee1394/init_ohci1394_dma.c
34434+++ b/drivers/ieee1394/init_ohci1394_dma.c
34435@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34436 for (func = 0; func < 8; func++) {
34437 u32 class = read_pci_config(num,slot,func,
34438 PCI_CLASS_REVISION);
34439- if ((class == 0xffffffff))
34440+ if (class == 0xffffffff)
34441 continue; /* No device at this func */
34442
34443 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34444diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34445index 65c1429..5d8c11f 100644
34446--- a/drivers/ieee1394/ohci1394.c
34447+++ b/drivers/ieee1394/ohci1394.c
34448@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34449 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34450
34451 /* Module Parameters */
34452-static int phys_dma = 1;
34453+static int phys_dma;
34454 module_param(phys_dma, int, 0444);
34455-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34456+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34457
34458 static void dma_trm_tasklet(unsigned long data);
34459 static void dma_trm_reset(struct dma_trm_ctx *d);
34460diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34461index f199896..78c9fc8 100644
34462--- a/drivers/ieee1394/sbp2.c
34463+++ b/drivers/ieee1394/sbp2.c
34464@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34465 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34466 MODULE_LICENSE("GPL");
34467
34468-static int sbp2_module_init(void)
34469+static int __init sbp2_module_init(void)
34470 {
34471 int ret;
34472
34473diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34474index a5dea6b..0cefe8f 100644
34475--- a/drivers/infiniband/core/cm.c
34476+++ b/drivers/infiniband/core/cm.c
34477@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34478
34479 struct cm_counter_group {
34480 struct kobject obj;
34481- atomic_long_t counter[CM_ATTR_COUNT];
34482+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34483 };
34484
34485 struct cm_counter_attribute {
34486@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34487 struct ib_mad_send_buf *msg = NULL;
34488 int ret;
34489
34490- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34491+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34492 counter[CM_REQ_COUNTER]);
34493
34494 /* Quick state check to discard duplicate REQs. */
34495@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34496 if (!cm_id_priv)
34497 return;
34498
34499- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34500+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34501 counter[CM_REP_COUNTER]);
34502 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34503 if (ret)
34504@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34505 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34506 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34507 spin_unlock_irq(&cm_id_priv->lock);
34508- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34509+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34510 counter[CM_RTU_COUNTER]);
34511 goto out;
34512 }
34513@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34514 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34515 dreq_msg->local_comm_id);
34516 if (!cm_id_priv) {
34517- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34518+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34519 counter[CM_DREQ_COUNTER]);
34520 cm_issue_drep(work->port, work->mad_recv_wc);
34521 return -EINVAL;
34522@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34523 case IB_CM_MRA_REP_RCVD:
34524 break;
34525 case IB_CM_TIMEWAIT:
34526- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34527+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34528 counter[CM_DREQ_COUNTER]);
34529 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34530 goto unlock;
34531@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34532 cm_free_msg(msg);
34533 goto deref;
34534 case IB_CM_DREQ_RCVD:
34535- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34536+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34537 counter[CM_DREQ_COUNTER]);
34538 goto unlock;
34539 default:
34540@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34541 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34542 cm_id_priv->msg, timeout)) {
34543 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34544- atomic_long_inc(&work->port->
34545+ atomic_long_inc_unchecked(&work->port->
34546 counter_group[CM_RECV_DUPLICATES].
34547 counter[CM_MRA_COUNTER]);
34548 goto out;
34549@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34550 break;
34551 case IB_CM_MRA_REQ_RCVD:
34552 case IB_CM_MRA_REP_RCVD:
34553- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34554+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34555 counter[CM_MRA_COUNTER]);
34556 /* fall through */
34557 default:
34558@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34559 case IB_CM_LAP_IDLE:
34560 break;
34561 case IB_CM_MRA_LAP_SENT:
34562- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34563+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34564 counter[CM_LAP_COUNTER]);
34565 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34566 goto unlock;
34567@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34568 cm_free_msg(msg);
34569 goto deref;
34570 case IB_CM_LAP_RCVD:
34571- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34572+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34573 counter[CM_LAP_COUNTER]);
34574 goto unlock;
34575 default:
34576@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34577 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34578 if (cur_cm_id_priv) {
34579 spin_unlock_irq(&cm.lock);
34580- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34581+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34582 counter[CM_SIDR_REQ_COUNTER]);
34583 goto out; /* Duplicate message. */
34584 }
34585@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34586 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34587 msg->retries = 1;
34588
34589- atomic_long_add(1 + msg->retries,
34590+ atomic_long_add_unchecked(1 + msg->retries,
34591 &port->counter_group[CM_XMIT].counter[attr_index]);
34592 if (msg->retries)
34593- atomic_long_add(msg->retries,
34594+ atomic_long_add_unchecked(msg->retries,
34595 &port->counter_group[CM_XMIT_RETRIES].
34596 counter[attr_index]);
34597
34598@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34599 }
34600
34601 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34602- atomic_long_inc(&port->counter_group[CM_RECV].
34603+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34604 counter[attr_id - CM_ATTR_ID_OFFSET]);
34605
34606 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34607@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34608 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34609
34610 return sprintf(buf, "%ld\n",
34611- atomic_long_read(&group->counter[cm_attr->index]));
34612+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34613 }
34614
34615-static struct sysfs_ops cm_counter_ops = {
34616+static const struct sysfs_ops cm_counter_ops = {
34617 .show = cm_show_counter
34618 };
34619
34620diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34621index 4507043..14ad522 100644
34622--- a/drivers/infiniband/core/fmr_pool.c
34623+++ b/drivers/infiniband/core/fmr_pool.c
34624@@ -97,8 +97,8 @@ struct ib_fmr_pool {
34625
34626 struct task_struct *thread;
34627
34628- atomic_t req_ser;
34629- atomic_t flush_ser;
34630+ atomic_unchecked_t req_ser;
34631+ atomic_unchecked_t flush_ser;
34632
34633 wait_queue_head_t force_wait;
34634 };
34635@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34636 struct ib_fmr_pool *pool = pool_ptr;
34637
34638 do {
34639- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34640+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34641 ib_fmr_batch_release(pool);
34642
34643- atomic_inc(&pool->flush_ser);
34644+ atomic_inc_unchecked(&pool->flush_ser);
34645 wake_up_interruptible(&pool->force_wait);
34646
34647 if (pool->flush_function)
34648@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34649 }
34650
34651 set_current_state(TASK_INTERRUPTIBLE);
34652- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34653+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34654 !kthread_should_stop())
34655 schedule();
34656 __set_current_state(TASK_RUNNING);
34657@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34658 pool->dirty_watermark = params->dirty_watermark;
34659 pool->dirty_len = 0;
34660 spin_lock_init(&pool->pool_lock);
34661- atomic_set(&pool->req_ser, 0);
34662- atomic_set(&pool->flush_ser, 0);
34663+ atomic_set_unchecked(&pool->req_ser, 0);
34664+ atomic_set_unchecked(&pool->flush_ser, 0);
34665 init_waitqueue_head(&pool->force_wait);
34666
34667 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34668@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34669 }
34670 spin_unlock_irq(&pool->pool_lock);
34671
34672- serial = atomic_inc_return(&pool->req_ser);
34673+ serial = atomic_inc_return_unchecked(&pool->req_ser);
34674 wake_up_process(pool->thread);
34675
34676 if (wait_event_interruptible(pool->force_wait,
34677- atomic_read(&pool->flush_ser) - serial >= 0))
34678+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34679 return -EINTR;
34680
34681 return 0;
34682@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34683 } else {
34684 list_add_tail(&fmr->list, &pool->dirty_list);
34685 if (++pool->dirty_len >= pool->dirty_watermark) {
34686- atomic_inc(&pool->req_ser);
34687+ atomic_inc_unchecked(&pool->req_ser);
34688 wake_up_process(pool->thread);
34689 }
34690 }
34691diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34692index 158a214..1558bb7 100644
34693--- a/drivers/infiniband/core/sysfs.c
34694+++ b/drivers/infiniband/core/sysfs.c
34695@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34696 return port_attr->show(p, port_attr, buf);
34697 }
34698
34699-static struct sysfs_ops port_sysfs_ops = {
34700+static const struct sysfs_ops port_sysfs_ops = {
34701 .show = port_attr_show
34702 };
34703
34704diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34705index 5440da0..1194ecb 100644
34706--- a/drivers/infiniband/core/uverbs_marshall.c
34707+++ b/drivers/infiniband/core/uverbs_marshall.c
34708@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34709 dst->grh.sgid_index = src->grh.sgid_index;
34710 dst->grh.hop_limit = src->grh.hop_limit;
34711 dst->grh.traffic_class = src->grh.traffic_class;
34712+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34713 dst->dlid = src->dlid;
34714 dst->sl = src->sl;
34715 dst->src_path_bits = src->src_path_bits;
34716 dst->static_rate = src->static_rate;
34717 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34718 dst->port_num = src->port_num;
34719+ dst->reserved = 0;
34720 }
34721 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34722
34723 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34724 struct ib_qp_attr *src)
34725 {
34726+ dst->qp_state = src->qp_state;
34727 dst->cur_qp_state = src->cur_qp_state;
34728 dst->path_mtu = src->path_mtu;
34729 dst->path_mig_state = src->path_mig_state;
34730@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34731 dst->rnr_retry = src->rnr_retry;
34732 dst->alt_port_num = src->alt_port_num;
34733 dst->alt_timeout = src->alt_timeout;
34734+ memset(dst->reserved, 0, sizeof(dst->reserved));
34735 }
34736 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34737
34738diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34739index 100da85..62e6b88 100644
34740--- a/drivers/infiniband/hw/ipath/ipath_fs.c
34741+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34742@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34743 struct infinipath_counters counters;
34744 struct ipath_devdata *dd;
34745
34746+ pax_track_stack();
34747+
34748 dd = file->f_path.dentry->d_inode->i_private;
34749 dd->ipath_f_read_counters(dd, &counters);
34750
34751diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34752index cbde0cf..afaf55c 100644
34753--- a/drivers/infiniband/hw/nes/nes.c
34754+++ b/drivers/infiniband/hw/nes/nes.c
34755@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34756 LIST_HEAD(nes_adapter_list);
34757 static LIST_HEAD(nes_dev_list);
34758
34759-atomic_t qps_destroyed;
34760+atomic_unchecked_t qps_destroyed;
34761
34762 static unsigned int ee_flsh_adapter;
34763 static unsigned int sysfs_nonidx_addr;
34764@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34765 struct nes_adapter *nesadapter = nesdev->nesadapter;
34766 u32 qp_id;
34767
34768- atomic_inc(&qps_destroyed);
34769+ atomic_inc_unchecked(&qps_destroyed);
34770
34771 /* Free the control structures */
34772
34773diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34774index bcc6abc..9c76b2f 100644
34775--- a/drivers/infiniband/hw/nes/nes.h
34776+++ b/drivers/infiniband/hw/nes/nes.h
34777@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
34778 extern unsigned int wqm_quanta;
34779 extern struct list_head nes_adapter_list;
34780
34781-extern atomic_t cm_connects;
34782-extern atomic_t cm_accepts;
34783-extern atomic_t cm_disconnects;
34784-extern atomic_t cm_closes;
34785-extern atomic_t cm_connecteds;
34786-extern atomic_t cm_connect_reqs;
34787-extern atomic_t cm_rejects;
34788-extern atomic_t mod_qp_timouts;
34789-extern atomic_t qps_created;
34790-extern atomic_t qps_destroyed;
34791-extern atomic_t sw_qps_destroyed;
34792+extern atomic_unchecked_t cm_connects;
34793+extern atomic_unchecked_t cm_accepts;
34794+extern atomic_unchecked_t cm_disconnects;
34795+extern atomic_unchecked_t cm_closes;
34796+extern atomic_unchecked_t cm_connecteds;
34797+extern atomic_unchecked_t cm_connect_reqs;
34798+extern atomic_unchecked_t cm_rejects;
34799+extern atomic_unchecked_t mod_qp_timouts;
34800+extern atomic_unchecked_t qps_created;
34801+extern atomic_unchecked_t qps_destroyed;
34802+extern atomic_unchecked_t sw_qps_destroyed;
34803 extern u32 mh_detected;
34804 extern u32 mh_pauses_sent;
34805 extern u32 cm_packets_sent;
34806@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
34807 extern u32 cm_listens_created;
34808 extern u32 cm_listens_destroyed;
34809 extern u32 cm_backlog_drops;
34810-extern atomic_t cm_loopbacks;
34811-extern atomic_t cm_nodes_created;
34812-extern atomic_t cm_nodes_destroyed;
34813-extern atomic_t cm_accel_dropped_pkts;
34814-extern atomic_t cm_resets_recvd;
34815+extern atomic_unchecked_t cm_loopbacks;
34816+extern atomic_unchecked_t cm_nodes_created;
34817+extern atomic_unchecked_t cm_nodes_destroyed;
34818+extern atomic_unchecked_t cm_accel_dropped_pkts;
34819+extern atomic_unchecked_t cm_resets_recvd;
34820
34821 extern u32 int_mod_timer_init;
34822 extern u32 int_mod_cq_depth_256;
34823diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34824index 73473db..5ed06e8 100644
34825--- a/drivers/infiniband/hw/nes/nes_cm.c
34826+++ b/drivers/infiniband/hw/nes/nes_cm.c
34827@@ -69,11 +69,11 @@ u32 cm_packets_received;
34828 u32 cm_listens_created;
34829 u32 cm_listens_destroyed;
34830 u32 cm_backlog_drops;
34831-atomic_t cm_loopbacks;
34832-atomic_t cm_nodes_created;
34833-atomic_t cm_nodes_destroyed;
34834-atomic_t cm_accel_dropped_pkts;
34835-atomic_t cm_resets_recvd;
34836+atomic_unchecked_t cm_loopbacks;
34837+atomic_unchecked_t cm_nodes_created;
34838+atomic_unchecked_t cm_nodes_destroyed;
34839+atomic_unchecked_t cm_accel_dropped_pkts;
34840+atomic_unchecked_t cm_resets_recvd;
34841
34842 static inline int mini_cm_accelerated(struct nes_cm_core *,
34843 struct nes_cm_node *);
34844@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
34845
34846 static struct nes_cm_core *g_cm_core;
34847
34848-atomic_t cm_connects;
34849-atomic_t cm_accepts;
34850-atomic_t cm_disconnects;
34851-atomic_t cm_closes;
34852-atomic_t cm_connecteds;
34853-atomic_t cm_connect_reqs;
34854-atomic_t cm_rejects;
34855+atomic_unchecked_t cm_connects;
34856+atomic_unchecked_t cm_accepts;
34857+atomic_unchecked_t cm_disconnects;
34858+atomic_unchecked_t cm_closes;
34859+atomic_unchecked_t cm_connecteds;
34860+atomic_unchecked_t cm_connect_reqs;
34861+atomic_unchecked_t cm_rejects;
34862
34863
34864 /**
34865@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34866 cm_node->rem_mac);
34867
34868 add_hte_node(cm_core, cm_node);
34869- atomic_inc(&cm_nodes_created);
34870+ atomic_inc_unchecked(&cm_nodes_created);
34871
34872 return cm_node;
34873 }
34874@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34875 }
34876
34877 atomic_dec(&cm_core->node_cnt);
34878- atomic_inc(&cm_nodes_destroyed);
34879+ atomic_inc_unchecked(&cm_nodes_destroyed);
34880 nesqp = cm_node->nesqp;
34881 if (nesqp) {
34882 nesqp->cm_node = NULL;
34883@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34884
34885 static void drop_packet(struct sk_buff *skb)
34886 {
34887- atomic_inc(&cm_accel_dropped_pkts);
34888+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34889 dev_kfree_skb_any(skb);
34890 }
34891
34892@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
34893
34894 int reset = 0; /* whether to send reset in case of err.. */
34895 int passive_state;
34896- atomic_inc(&cm_resets_recvd);
34897+ atomic_inc_unchecked(&cm_resets_recvd);
34898 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
34899 " refcnt=%d\n", cm_node, cm_node->state,
34900 atomic_read(&cm_node->ref_count));
34901@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
34902 rem_ref_cm_node(cm_node->cm_core, cm_node);
34903 return NULL;
34904 }
34905- atomic_inc(&cm_loopbacks);
34906+ atomic_inc_unchecked(&cm_loopbacks);
34907 loopbackremotenode->loopbackpartner = cm_node;
34908 loopbackremotenode->tcp_cntxt.rcv_wscale =
34909 NES_CM_DEFAULT_RCV_WND_SCALE;
34910@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
34911 add_ref_cm_node(cm_node);
34912 } else if (cm_node->state == NES_CM_STATE_TSA) {
34913 rem_ref_cm_node(cm_core, cm_node);
34914- atomic_inc(&cm_accel_dropped_pkts);
34915+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34916 dev_kfree_skb_any(skb);
34917 break;
34918 }
34919@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34920
34921 if ((cm_id) && (cm_id->event_handler)) {
34922 if (issue_disconn) {
34923- atomic_inc(&cm_disconnects);
34924+ atomic_inc_unchecked(&cm_disconnects);
34925 cm_event.event = IW_CM_EVENT_DISCONNECT;
34926 cm_event.status = disconn_status;
34927 cm_event.local_addr = cm_id->local_addr;
34928@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34929 }
34930
34931 if (issue_close) {
34932- atomic_inc(&cm_closes);
34933+ atomic_inc_unchecked(&cm_closes);
34934 nes_disconnect(nesqp, 1);
34935
34936 cm_id->provider_data = nesqp;
34937@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34938
34939 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
34940 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
34941- atomic_inc(&cm_accepts);
34942+ atomic_inc_unchecked(&cm_accepts);
34943
34944 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
34945 atomic_read(&nesvnic->netdev->refcnt));
34946@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
34947
34948 struct nes_cm_core *cm_core;
34949
34950- atomic_inc(&cm_rejects);
34951+ atomic_inc_unchecked(&cm_rejects);
34952 cm_node = (struct nes_cm_node *) cm_id->provider_data;
34953 loopback = cm_node->loopbackpartner;
34954 cm_core = cm_node->cm_core;
34955@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34956 ntohl(cm_id->local_addr.sin_addr.s_addr),
34957 ntohs(cm_id->local_addr.sin_port));
34958
34959- atomic_inc(&cm_connects);
34960+ atomic_inc_unchecked(&cm_connects);
34961 nesqp->active_conn = 1;
34962
34963 /* cache the cm_id in the qp */
34964@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
34965 if (nesqp->destroyed) {
34966 return;
34967 }
34968- atomic_inc(&cm_connecteds);
34969+ atomic_inc_unchecked(&cm_connecteds);
34970 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
34971 " local port 0x%04X. jiffies = %lu.\n",
34972 nesqp->hwqp.qp_id,
34973@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
34974
34975 ret = cm_id->event_handler(cm_id, &cm_event);
34976 cm_id->add_ref(cm_id);
34977- atomic_inc(&cm_closes);
34978+ atomic_inc_unchecked(&cm_closes);
34979 cm_event.event = IW_CM_EVENT_CLOSE;
34980 cm_event.status = IW_CM_EVENT_STATUS_OK;
34981 cm_event.provider_data = cm_id->provider_data;
34982@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
34983 return;
34984 cm_id = cm_node->cm_id;
34985
34986- atomic_inc(&cm_connect_reqs);
34987+ atomic_inc_unchecked(&cm_connect_reqs);
34988 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34989 cm_node, cm_id, jiffies);
34990
34991@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
34992 return;
34993 cm_id = cm_node->cm_id;
34994
34995- atomic_inc(&cm_connect_reqs);
34996+ atomic_inc_unchecked(&cm_connect_reqs);
34997 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34998 cm_node, cm_id, jiffies);
34999
35000diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35001index e593af3..870694a 100644
35002--- a/drivers/infiniband/hw/nes/nes_nic.c
35003+++ b/drivers/infiniband/hw/nes/nes_nic.c
35004@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35005 target_stat_values[++index] = mh_detected;
35006 target_stat_values[++index] = mh_pauses_sent;
35007 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35008- target_stat_values[++index] = atomic_read(&cm_connects);
35009- target_stat_values[++index] = atomic_read(&cm_accepts);
35010- target_stat_values[++index] = atomic_read(&cm_disconnects);
35011- target_stat_values[++index] = atomic_read(&cm_connecteds);
35012- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35013- target_stat_values[++index] = atomic_read(&cm_rejects);
35014- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35015- target_stat_values[++index] = atomic_read(&qps_created);
35016- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35017- target_stat_values[++index] = atomic_read(&qps_destroyed);
35018- target_stat_values[++index] = atomic_read(&cm_closes);
35019+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35020+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35021+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35022+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35023+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35024+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35025+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35026+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35027+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35028+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35029+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35030 target_stat_values[++index] = cm_packets_sent;
35031 target_stat_values[++index] = cm_packets_bounced;
35032 target_stat_values[++index] = cm_packets_created;
35033@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35034 target_stat_values[++index] = cm_listens_created;
35035 target_stat_values[++index] = cm_listens_destroyed;
35036 target_stat_values[++index] = cm_backlog_drops;
35037- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35038- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35039- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35040- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35041- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35042+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35043+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35044+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35045+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35046+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35047 target_stat_values[++index] = int_mod_timer_init;
35048 target_stat_values[++index] = int_mod_cq_depth_1;
35049 target_stat_values[++index] = int_mod_cq_depth_4;
35050diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35051index a680c42..f914deb 100644
35052--- a/drivers/infiniband/hw/nes/nes_verbs.c
35053+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35054@@ -45,9 +45,9 @@
35055
35056 #include <rdma/ib_umem.h>
35057
35058-atomic_t mod_qp_timouts;
35059-atomic_t qps_created;
35060-atomic_t sw_qps_destroyed;
35061+atomic_unchecked_t mod_qp_timouts;
35062+atomic_unchecked_t qps_created;
35063+atomic_unchecked_t sw_qps_destroyed;
35064
35065 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35066
35067@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35068 if (init_attr->create_flags)
35069 return ERR_PTR(-EINVAL);
35070
35071- atomic_inc(&qps_created);
35072+ atomic_inc_unchecked(&qps_created);
35073 switch (init_attr->qp_type) {
35074 case IB_QPT_RC:
35075 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35076@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35077 struct iw_cm_event cm_event;
35078 int ret;
35079
35080- atomic_inc(&sw_qps_destroyed);
35081+ atomic_inc_unchecked(&sw_qps_destroyed);
35082 nesqp->destroyed = 1;
35083
35084 /* Blow away the connection if it exists. */
35085diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35086index ac11be0..3883c04 100644
35087--- a/drivers/input/gameport/gameport.c
35088+++ b/drivers/input/gameport/gameport.c
35089@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35090 */
35091 static void gameport_init_port(struct gameport *gameport)
35092 {
35093- static atomic_t gameport_no = ATOMIC_INIT(0);
35094+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35095
35096 __module_get(THIS_MODULE);
35097
35098 mutex_init(&gameport->drv_mutex);
35099 device_initialize(&gameport->dev);
35100- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35101+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35102 gameport->dev.bus = &gameport_bus;
35103 gameport->dev.release = gameport_release_port;
35104 if (gameport->parent)
35105diff --git a/drivers/input/input.c b/drivers/input/input.c
35106index c82ae82..8cfb9cb 100644
35107--- a/drivers/input/input.c
35108+++ b/drivers/input/input.c
35109@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35110 */
35111 int input_register_device(struct input_dev *dev)
35112 {
35113- static atomic_t input_no = ATOMIC_INIT(0);
35114+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35115 struct input_handler *handler;
35116 const char *path;
35117 int error;
35118@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35119 dev->setkeycode = input_default_setkeycode;
35120
35121 dev_set_name(&dev->dev, "input%ld",
35122- (unsigned long) atomic_inc_return(&input_no) - 1);
35123+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35124
35125 error = device_add(&dev->dev);
35126 if (error)
35127diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35128index ca13a6b..b032b0c 100644
35129--- a/drivers/input/joystick/sidewinder.c
35130+++ b/drivers/input/joystick/sidewinder.c
35131@@ -30,6 +30,7 @@
35132 #include <linux/kernel.h>
35133 #include <linux/module.h>
35134 #include <linux/slab.h>
35135+#include <linux/sched.h>
35136 #include <linux/init.h>
35137 #include <linux/input.h>
35138 #include <linux/gameport.h>
35139@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35140 unsigned char buf[SW_LENGTH];
35141 int i;
35142
35143+ pax_track_stack();
35144+
35145 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35146
35147 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35148diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35149index 79e3edc..01412b9 100644
35150--- a/drivers/input/joystick/xpad.c
35151+++ b/drivers/input/joystick/xpad.c
35152@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35153
35154 static int xpad_led_probe(struct usb_xpad *xpad)
35155 {
35156- static atomic_t led_seq = ATOMIC_INIT(0);
35157+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35158 long led_no;
35159 struct xpad_led *led;
35160 struct led_classdev *led_cdev;
35161@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35162 if (!led)
35163 return -ENOMEM;
35164
35165- led_no = (long)atomic_inc_return(&led_seq) - 1;
35166+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35167
35168 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35169 led->xpad = xpad;
35170diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35171index 0236f0d..c7327f1 100644
35172--- a/drivers/input/serio/serio.c
35173+++ b/drivers/input/serio/serio.c
35174@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35175 */
35176 static void serio_init_port(struct serio *serio)
35177 {
35178- static atomic_t serio_no = ATOMIC_INIT(0);
35179+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35180
35181 __module_get(THIS_MODULE);
35182
35183@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35184 mutex_init(&serio->drv_mutex);
35185 device_initialize(&serio->dev);
35186 dev_set_name(&serio->dev, "serio%ld",
35187- (long)atomic_inc_return(&serio_no) - 1);
35188+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35189 serio->dev.bus = &serio_bus;
35190 serio->dev.release = serio_release_port;
35191 if (serio->parent) {
35192diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35193index 33dcd8d..2783d25 100644
35194--- a/drivers/isdn/gigaset/common.c
35195+++ b/drivers/isdn/gigaset/common.c
35196@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35197 cs->commands_pending = 0;
35198 cs->cur_at_seq = 0;
35199 cs->gotfwver = -1;
35200- cs->open_count = 0;
35201+ local_set(&cs->open_count, 0);
35202 cs->dev = NULL;
35203 cs->tty = NULL;
35204 cs->tty_dev = NULL;
35205diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35206index a2f6125..6a70677 100644
35207--- a/drivers/isdn/gigaset/gigaset.h
35208+++ b/drivers/isdn/gigaset/gigaset.h
35209@@ -34,6 +34,7 @@
35210 #include <linux/tty_driver.h>
35211 #include <linux/list.h>
35212 #include <asm/atomic.h>
35213+#include <asm/local.h>
35214
35215 #define GIG_VERSION {0,5,0,0}
35216 #define GIG_COMPAT {0,4,0,0}
35217@@ -446,7 +447,7 @@ struct cardstate {
35218 spinlock_t cmdlock;
35219 unsigned curlen, cmdbytes;
35220
35221- unsigned open_count;
35222+ local_t open_count;
35223 struct tty_struct *tty;
35224 struct tasklet_struct if_wake_tasklet;
35225 unsigned control_state;
35226diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35227index b3065b8..c7e8cc9 100644
35228--- a/drivers/isdn/gigaset/interface.c
35229+++ b/drivers/isdn/gigaset/interface.c
35230@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35231 return -ERESTARTSYS; // FIXME -EINTR?
35232 tty->driver_data = cs;
35233
35234- ++cs->open_count;
35235-
35236- if (cs->open_count == 1) {
35237+ if (local_inc_return(&cs->open_count) == 1) {
35238 spin_lock_irqsave(&cs->lock, flags);
35239 cs->tty = tty;
35240 spin_unlock_irqrestore(&cs->lock, flags);
35241@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35242
35243 if (!cs->connected)
35244 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35245- else if (!cs->open_count)
35246+ else if (!local_read(&cs->open_count))
35247 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35248 else {
35249- if (!--cs->open_count) {
35250+ if (!local_dec_return(&cs->open_count)) {
35251 spin_lock_irqsave(&cs->lock, flags);
35252 cs->tty = NULL;
35253 spin_unlock_irqrestore(&cs->lock, flags);
35254@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35255 if (!cs->connected) {
35256 gig_dbg(DEBUG_IF, "not connected");
35257 retval = -ENODEV;
35258- } else if (!cs->open_count)
35259+ } else if (!local_read(&cs->open_count))
35260 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35261 else {
35262 retval = 0;
35263@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35264 if (!cs->connected) {
35265 gig_dbg(DEBUG_IF, "not connected");
35266 retval = -ENODEV;
35267- } else if (!cs->open_count)
35268+ } else if (!local_read(&cs->open_count))
35269 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35270 else if (cs->mstate != MS_LOCKED) {
35271 dev_warn(cs->dev, "can't write to unlocked device\n");
35272@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35273 if (!cs->connected) {
35274 gig_dbg(DEBUG_IF, "not connected");
35275 retval = -ENODEV;
35276- } else if (!cs->open_count)
35277+ } else if (!local_read(&cs->open_count))
35278 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35279 else if (cs->mstate != MS_LOCKED) {
35280 dev_warn(cs->dev, "can't write to unlocked device\n");
35281@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35282
35283 if (!cs->connected)
35284 gig_dbg(DEBUG_IF, "not connected");
35285- else if (!cs->open_count)
35286+ else if (!local_read(&cs->open_count))
35287 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35288 else if (cs->mstate != MS_LOCKED)
35289 dev_warn(cs->dev, "can't write to unlocked device\n");
35290@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35291
35292 if (!cs->connected)
35293 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35294- else if (!cs->open_count)
35295+ else if (!local_read(&cs->open_count))
35296 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35297 else {
35298 //FIXME
35299@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35300
35301 if (!cs->connected)
35302 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35303- else if (!cs->open_count)
35304+ else if (!local_read(&cs->open_count))
35305 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35306 else {
35307 //FIXME
35308@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35309 goto out;
35310 }
35311
35312- if (!cs->open_count) {
35313+ if (!local_read(&cs->open_count)) {
35314 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35315 goto out;
35316 }
35317diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35318index a7c0083..62a7cb6 100644
35319--- a/drivers/isdn/hardware/avm/b1.c
35320+++ b/drivers/isdn/hardware/avm/b1.c
35321@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35322 }
35323 if (left) {
35324 if (t4file->user) {
35325- if (copy_from_user(buf, dp, left))
35326+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35327 return -EFAULT;
35328 } else {
35329 memcpy(buf, dp, left);
35330@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35331 }
35332 if (left) {
35333 if (config->user) {
35334- if (copy_from_user(buf, dp, left))
35335+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35336 return -EFAULT;
35337 } else {
35338 memcpy(buf, dp, left);
35339diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35340index f130724..c373c68 100644
35341--- a/drivers/isdn/hardware/eicon/capidtmf.c
35342+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35343@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35344 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35345 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35346
35347+ pax_track_stack();
35348
35349 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35350 {
35351diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35352index 4d425c6..a9be6c4 100644
35353--- a/drivers/isdn/hardware/eicon/capifunc.c
35354+++ b/drivers/isdn/hardware/eicon/capifunc.c
35355@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35356 IDI_SYNC_REQ req;
35357 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35358
35359+ pax_track_stack();
35360+
35361 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35362
35363 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35364diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35365index 3029234..ef0d9e2 100644
35366--- a/drivers/isdn/hardware/eicon/diddfunc.c
35367+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35368@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35369 IDI_SYNC_REQ req;
35370 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35371
35372+ pax_track_stack();
35373+
35374 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35375
35376 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35377diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35378index d36a4c0..11e7d1a 100644
35379--- a/drivers/isdn/hardware/eicon/divasfunc.c
35380+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35381@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35382 IDI_SYNC_REQ req;
35383 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35384
35385+ pax_track_stack();
35386+
35387 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35388
35389 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35390diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35391index 85784a7..a19ca98 100644
35392--- a/drivers/isdn/hardware/eicon/divasync.h
35393+++ b/drivers/isdn/hardware/eicon/divasync.h
35394@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35395 } diva_didd_add_adapter_t;
35396 typedef struct _diva_didd_remove_adapter {
35397 IDI_CALL p_request;
35398-} diva_didd_remove_adapter_t;
35399+} __no_const diva_didd_remove_adapter_t;
35400 typedef struct _diva_didd_read_adapter_array {
35401 void * buffer;
35402 dword length;
35403diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35404index db87d51..7d09acf 100644
35405--- a/drivers/isdn/hardware/eicon/idifunc.c
35406+++ b/drivers/isdn/hardware/eicon/idifunc.c
35407@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35408 IDI_SYNC_REQ req;
35409 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35410
35411+ pax_track_stack();
35412+
35413 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35414
35415 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35416diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35417index ae89fb8..0fab299 100644
35418--- a/drivers/isdn/hardware/eicon/message.c
35419+++ b/drivers/isdn/hardware/eicon/message.c
35420@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35421 dword d;
35422 word w;
35423
35424+ pax_track_stack();
35425+
35426 a = plci->adapter;
35427 Id = ((word)plci->Id<<8)|a->Id;
35428 PUT_WORD(&SS_Ind[4],0x0000);
35429@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35430 word j, n, w;
35431 dword d;
35432
35433+ pax_track_stack();
35434+
35435
35436 for(i=0;i<8;i++) bp_parms[i].length = 0;
35437 for(i=0;i<2;i++) global_config[i].length = 0;
35438@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35439 const byte llc3[] = {4,3,2,2,6,6,0};
35440 const byte header[] = {0,2,3,3,0,0,0};
35441
35442+ pax_track_stack();
35443+
35444 for(i=0;i<8;i++) bp_parms[i].length = 0;
35445 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35446 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35447@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35448 word appl_number_group_type[MAX_APPL];
35449 PLCI *auxplci;
35450
35451+ pax_track_stack();
35452+
35453 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35454
35455 if(!a->group_optimization_enabled)
35456diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35457index a564b75..f3cf8b5 100644
35458--- a/drivers/isdn/hardware/eicon/mntfunc.c
35459+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35460@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35461 IDI_SYNC_REQ req;
35462 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35463
35464+ pax_track_stack();
35465+
35466 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35467
35468 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35469diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35470index a3bd163..8956575 100644
35471--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35472+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35473@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35474 typedef struct _diva_os_idi_adapter_interface {
35475 diva_init_card_proc_t cleanup_adapter_proc;
35476 diva_cmd_card_proc_t cmd_proc;
35477-} diva_os_idi_adapter_interface_t;
35478+} __no_const diva_os_idi_adapter_interface_t;
35479
35480 typedef struct _diva_os_xdi_adapter {
35481 struct list_head link;
35482diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35483index adb1e8c..21b590b 100644
35484--- a/drivers/isdn/i4l/isdn_common.c
35485+++ b/drivers/isdn/i4l/isdn_common.c
35486@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35487 } iocpar;
35488 void __user *argp = (void __user *)arg;
35489
35490+ pax_track_stack();
35491+
35492 #define name iocpar.name
35493 #define bname iocpar.bname
35494 #define iocts iocpar.iocts
35495diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35496index bf7997a..cf091db 100644
35497--- a/drivers/isdn/icn/icn.c
35498+++ b/drivers/isdn/icn/icn.c
35499@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35500 if (count > len)
35501 count = len;
35502 if (user) {
35503- if (copy_from_user(msg, buf, count))
35504+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35505 return -EFAULT;
35506 } else
35507 memcpy(msg, buf, count);
35508diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35509index feb0fa4..f76f830 100644
35510--- a/drivers/isdn/mISDN/socket.c
35511+++ b/drivers/isdn/mISDN/socket.c
35512@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35513 if (dev) {
35514 struct mISDN_devinfo di;
35515
35516+ memset(&di, 0, sizeof(di));
35517 di.id = dev->id;
35518 di.Dprotocols = dev->Dprotocols;
35519 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35520@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35521 if (dev) {
35522 struct mISDN_devinfo di;
35523
35524+ memset(&di, 0, sizeof(di));
35525 di.id = dev->id;
35526 di.Dprotocols = dev->Dprotocols;
35527 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35528diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35529index 485be8b..f0225bc 100644
35530--- a/drivers/isdn/sc/interrupt.c
35531+++ b/drivers/isdn/sc/interrupt.c
35532@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35533 }
35534 else if(callid>=0x0000 && callid<=0x7FFF)
35535 {
35536+ int len;
35537+
35538 pr_debug("%s: Got Incoming Call\n",
35539 sc_adapter[card]->devicename);
35540- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35541- strcpy(setup.eazmsn,
35542- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35543+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35544+ sizeof(setup.phone));
35545+ if (len >= sizeof(setup.phone))
35546+ continue;
35547+ len = strlcpy(setup.eazmsn,
35548+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35549+ sizeof(setup.eazmsn));
35550+ if (len >= sizeof(setup.eazmsn))
35551+ continue;
35552 setup.si1 = 7;
35553 setup.si2 = 0;
35554 setup.plan = 0;
35555@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35556 * Handle a GetMyNumber Rsp
35557 */
35558 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35559- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35560+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35561+ rcvmsg.msg_data.byte_array,
35562+ sizeof(rcvmsg.msg_data.byte_array));
35563 continue;
35564 }
35565
35566diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35567index 8744d24..d1f9a9a 100644
35568--- a/drivers/lguest/core.c
35569+++ b/drivers/lguest/core.c
35570@@ -91,9 +91,17 @@ static __init int map_switcher(void)
35571 * it's worked so far. The end address needs +1 because __get_vm_area
35572 * allocates an extra guard page, so we need space for that.
35573 */
35574+
35575+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35576+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35577+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35578+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35579+#else
35580 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35581 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35582 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35583+#endif
35584+
35585 if (!switcher_vma) {
35586 err = -ENOMEM;
35587 printk("lguest: could not map switcher pages high\n");
35588@@ -118,7 +126,7 @@ static __init int map_switcher(void)
35589 * Now the Switcher is mapped at the right address, we can't fail!
35590 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35591 */
35592- memcpy(switcher_vma->addr, start_switcher_text,
35593+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35594 end_switcher_text - start_switcher_text);
35595
35596 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35597diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35598index 6ae3888..8b38145 100644
35599--- a/drivers/lguest/x86/core.c
35600+++ b/drivers/lguest/x86/core.c
35601@@ -59,7 +59,7 @@ static struct {
35602 /* Offset from where switcher.S was compiled to where we've copied it */
35603 static unsigned long switcher_offset(void)
35604 {
35605- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35606+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35607 }
35608
35609 /* This cpu's struct lguest_pages. */
35610@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35611 * These copies are pretty cheap, so we do them unconditionally: */
35612 /* Save the current Host top-level page directory.
35613 */
35614+
35615+#ifdef CONFIG_PAX_PER_CPU_PGD
35616+ pages->state.host_cr3 = read_cr3();
35617+#else
35618 pages->state.host_cr3 = __pa(current->mm->pgd);
35619+#endif
35620+
35621 /*
35622 * Set up the Guest's page tables to see this CPU's pages (and no
35623 * other CPU's pages).
35624@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35625 * compiled-in switcher code and the high-mapped copy we just made.
35626 */
35627 for (i = 0; i < IDT_ENTRIES; i++)
35628- default_idt_entries[i] += switcher_offset();
35629+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35630
35631 /*
35632 * Set up the Switcher's per-cpu areas.
35633@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35634 * it will be undisturbed when we switch. To change %cs and jump we
35635 * need this structure to feed to Intel's "lcall" instruction.
35636 */
35637- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35638+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35639 lguest_entry.segment = LGUEST_CS;
35640
35641 /*
35642diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35643index 40634b0..4f5855e 100644
35644--- a/drivers/lguest/x86/switcher_32.S
35645+++ b/drivers/lguest/x86/switcher_32.S
35646@@ -87,6 +87,7 @@
35647 #include <asm/page.h>
35648 #include <asm/segment.h>
35649 #include <asm/lguest.h>
35650+#include <asm/processor-flags.h>
35651
35652 // We mark the start of the code to copy
35653 // It's placed in .text tho it's never run here
35654@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35655 // Changes type when we load it: damn Intel!
35656 // For after we switch over our page tables
35657 // That entry will be read-only: we'd crash.
35658+
35659+#ifdef CONFIG_PAX_KERNEXEC
35660+ mov %cr0, %edx
35661+ xor $X86_CR0_WP, %edx
35662+ mov %edx, %cr0
35663+#endif
35664+
35665 movl $(GDT_ENTRY_TSS*8), %edx
35666 ltr %dx
35667
35668@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35669 // Let's clear it again for our return.
35670 // The GDT descriptor of the Host
35671 // Points to the table after two "size" bytes
35672- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35673+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35674 // Clear "used" from type field (byte 5, bit 2)
35675- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35676+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35677+
35678+#ifdef CONFIG_PAX_KERNEXEC
35679+ mov %cr0, %eax
35680+ xor $X86_CR0_WP, %eax
35681+ mov %eax, %cr0
35682+#endif
35683
35684 // Once our page table's switched, the Guest is live!
35685 // The Host fades as we run this final step.
35686@@ -295,13 +309,12 @@ deliver_to_host:
35687 // I consulted gcc, and it gave
35688 // These instructions, which I gladly credit:
35689 leal (%edx,%ebx,8), %eax
35690- movzwl (%eax),%edx
35691- movl 4(%eax), %eax
35692- xorw %ax, %ax
35693- orl %eax, %edx
35694+ movl 4(%eax), %edx
35695+ movw (%eax), %dx
35696 // Now the address of the handler's in %edx
35697 // We call it now: its "iret" drops us home.
35698- jmp *%edx
35699+ ljmp $__KERNEL_CS, $1f
35700+1: jmp *%edx
35701
35702 // Every interrupt can come to us here
35703 // But we must truly tell each apart.
35704diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35705index 588a5b0..b71db89 100644
35706--- a/drivers/macintosh/macio_asic.c
35707+++ b/drivers/macintosh/macio_asic.c
35708@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35709 * MacIO is matched against any Apple ID, it's probe() function
35710 * will then decide wether it applies or not
35711 */
35712-static const struct pci_device_id __devinitdata pci_ids [] = { {
35713+static const struct pci_device_id __devinitconst pci_ids [] = { {
35714 .vendor = PCI_VENDOR_ID_APPLE,
35715 .device = PCI_ANY_ID,
35716 .subvendor = PCI_ANY_ID,
35717diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35718index a348bb0..ecd9b3f 100644
35719--- a/drivers/macintosh/via-pmu-backlight.c
35720+++ b/drivers/macintosh/via-pmu-backlight.c
35721@@ -15,7 +15,7 @@
35722
35723 #define MAX_PMU_LEVEL 0xFF
35724
35725-static struct backlight_ops pmu_backlight_data;
35726+static const struct backlight_ops pmu_backlight_data;
35727 static DEFINE_SPINLOCK(pmu_backlight_lock);
35728 static int sleeping, uses_pmu_bl;
35729 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35730@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35731 return bd->props.brightness;
35732 }
35733
35734-static struct backlight_ops pmu_backlight_data = {
35735+static const struct backlight_ops pmu_backlight_data = {
35736 .get_brightness = pmu_backlight_get_brightness,
35737 .update_status = pmu_backlight_update_status,
35738
35739diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
35740index 6f308a4..b5f7ff7 100644
35741--- a/drivers/macintosh/via-pmu.c
35742+++ b/drivers/macintosh/via-pmu.c
35743@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
35744 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
35745 }
35746
35747-static struct platform_suspend_ops pmu_pm_ops = {
35748+static const struct platform_suspend_ops pmu_pm_ops = {
35749 .enter = powerbook_sleep,
35750 .valid = pmu_sleep_valid,
35751 };
35752diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35753index 818b617..4656e38 100644
35754--- a/drivers/md/dm-ioctl.c
35755+++ b/drivers/md/dm-ioctl.c
35756@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35757 cmd == DM_LIST_VERSIONS_CMD)
35758 return 0;
35759
35760- if ((cmd == DM_DEV_CREATE_CMD)) {
35761+ if (cmd == DM_DEV_CREATE_CMD) {
35762 if (!*param->name) {
35763 DMWARN("name not supplied when creating device");
35764 return -EINVAL;
35765diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
35766index 6021d0a..a878643 100644
35767--- a/drivers/md/dm-raid1.c
35768+++ b/drivers/md/dm-raid1.c
35769@@ -41,7 +41,7 @@ enum dm_raid1_error {
35770
35771 struct mirror {
35772 struct mirror_set *ms;
35773- atomic_t error_count;
35774+ atomic_unchecked_t error_count;
35775 unsigned long error_type;
35776 struct dm_dev *dev;
35777 sector_t offset;
35778@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35779 * simple way to tell if a device has encountered
35780 * errors.
35781 */
35782- atomic_inc(&m->error_count);
35783+ atomic_inc_unchecked(&m->error_count);
35784
35785 if (test_and_set_bit(error_type, &m->error_type))
35786 return;
35787@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35788 }
35789
35790 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
35791- if (!atomic_read(&new->error_count)) {
35792+ if (!atomic_read_unchecked(&new->error_count)) {
35793 set_default_mirror(new);
35794 break;
35795 }
35796@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
35797 struct mirror *m = get_default_mirror(ms);
35798
35799 do {
35800- if (likely(!atomic_read(&m->error_count)))
35801+ if (likely(!atomic_read_unchecked(&m->error_count)))
35802 return m;
35803
35804 if (m-- == ms->mirror)
35805@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
35806 {
35807 struct mirror *default_mirror = get_default_mirror(m->ms);
35808
35809- return !atomic_read(&default_mirror->error_count);
35810+ return !atomic_read_unchecked(&default_mirror->error_count);
35811 }
35812
35813 static int mirror_available(struct mirror_set *ms, struct bio *bio)
35814@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
35815 */
35816 if (likely(region_in_sync(ms, region, 1)))
35817 m = choose_mirror(ms, bio->bi_sector);
35818- else if (m && atomic_read(&m->error_count))
35819+ else if (m && atomic_read_unchecked(&m->error_count))
35820 m = NULL;
35821
35822 if (likely(m))
35823@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
35824 }
35825
35826 ms->mirror[mirror].ms = ms;
35827- atomic_set(&(ms->mirror[mirror].error_count), 0);
35828+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
35829 ms->mirror[mirror].error_type = 0;
35830 ms->mirror[mirror].offset = offset;
35831
35832@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
35833 */
35834 static char device_status_char(struct mirror *m)
35835 {
35836- if (!atomic_read(&(m->error_count)))
35837+ if (!atomic_read_unchecked(&(m->error_count)))
35838 return 'A';
35839
35840 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
35841diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35842index bd58703..9f26571 100644
35843--- a/drivers/md/dm-stripe.c
35844+++ b/drivers/md/dm-stripe.c
35845@@ -20,7 +20,7 @@ struct stripe {
35846 struct dm_dev *dev;
35847 sector_t physical_start;
35848
35849- atomic_t error_count;
35850+ atomic_unchecked_t error_count;
35851 };
35852
35853 struct stripe_c {
35854@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35855 kfree(sc);
35856 return r;
35857 }
35858- atomic_set(&(sc->stripe[i].error_count), 0);
35859+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35860 }
35861
35862 ti->private = sc;
35863@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
35864 DMEMIT("%d ", sc->stripes);
35865 for (i = 0; i < sc->stripes; i++) {
35866 DMEMIT("%s ", sc->stripe[i].dev->name);
35867- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35868+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35869 'D' : 'A';
35870 }
35871 buffer[i] = '\0';
35872@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
35873 */
35874 for (i = 0; i < sc->stripes; i++)
35875 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35876- atomic_inc(&(sc->stripe[i].error_count));
35877- if (atomic_read(&(sc->stripe[i].error_count)) <
35878+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
35879+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35880 DM_IO_ERROR_THRESHOLD)
35881 queue_work(kstriped, &sc->kstriped_ws);
35882 }
35883diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
35884index 4b04590..13a77b2 100644
35885--- a/drivers/md/dm-sysfs.c
35886+++ b/drivers/md/dm-sysfs.c
35887@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
35888 NULL,
35889 };
35890
35891-static struct sysfs_ops dm_sysfs_ops = {
35892+static const struct sysfs_ops dm_sysfs_ops = {
35893 .show = dm_attr_show,
35894 };
35895
35896diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
35897index 03345bb..332250d 100644
35898--- a/drivers/md/dm-table.c
35899+++ b/drivers/md/dm-table.c
35900@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
35901 if (!dev_size)
35902 return 0;
35903
35904- if ((start >= dev_size) || (start + len > dev_size)) {
35905+ if ((start >= dev_size) || (len > dev_size - start)) {
35906 DMWARN("%s: %s too small for target: "
35907 "start=%llu, len=%llu, dev_size=%llu",
35908 dm_device_name(ti->table->md), bdevname(bdev, b),
35909diff --git a/drivers/md/dm.c b/drivers/md/dm.c
35910index c988ac2..c418141 100644
35911--- a/drivers/md/dm.c
35912+++ b/drivers/md/dm.c
35913@@ -165,9 +165,9 @@ struct mapped_device {
35914 /*
35915 * Event handling.
35916 */
35917- atomic_t event_nr;
35918+ atomic_unchecked_t event_nr;
35919 wait_queue_head_t eventq;
35920- atomic_t uevent_seq;
35921+ atomic_unchecked_t uevent_seq;
35922 struct list_head uevent_list;
35923 spinlock_t uevent_lock; /* Protect access to uevent_list */
35924
35925@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
35926 rwlock_init(&md->map_lock);
35927 atomic_set(&md->holders, 1);
35928 atomic_set(&md->open_count, 0);
35929- atomic_set(&md->event_nr, 0);
35930- atomic_set(&md->uevent_seq, 0);
35931+ atomic_set_unchecked(&md->event_nr, 0);
35932+ atomic_set_unchecked(&md->uevent_seq, 0);
35933 INIT_LIST_HEAD(&md->uevent_list);
35934 spin_lock_init(&md->uevent_lock);
35935
35936@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
35937
35938 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
35939
35940- atomic_inc(&md->event_nr);
35941+ atomic_inc_unchecked(&md->event_nr);
35942 wake_up(&md->eventq);
35943 }
35944
35945@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
35946
35947 uint32_t dm_next_uevent_seq(struct mapped_device *md)
35948 {
35949- return atomic_add_return(1, &md->uevent_seq);
35950+ return atomic_add_return_unchecked(1, &md->uevent_seq);
35951 }
35952
35953 uint32_t dm_get_event_nr(struct mapped_device *md)
35954 {
35955- return atomic_read(&md->event_nr);
35956+ return atomic_read_unchecked(&md->event_nr);
35957 }
35958
35959 int dm_wait_event(struct mapped_device *md, int event_nr)
35960 {
35961 return wait_event_interruptible(md->eventq,
35962- (event_nr != atomic_read(&md->event_nr)));
35963+ (event_nr != atomic_read_unchecked(&md->event_nr)));
35964 }
35965
35966 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
35967diff --git a/drivers/md/md.c b/drivers/md/md.c
35968index 4ce6e2f..7a9530a 100644
35969--- a/drivers/md/md.c
35970+++ b/drivers/md/md.c
35971@@ -153,10 +153,10 @@ static int start_readonly;
35972 * start build, activate spare
35973 */
35974 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
35975-static atomic_t md_event_count;
35976+static atomic_unchecked_t md_event_count;
35977 void md_new_event(mddev_t *mddev)
35978 {
35979- atomic_inc(&md_event_count);
35980+ atomic_inc_unchecked(&md_event_count);
35981 wake_up(&md_event_waiters);
35982 }
35983 EXPORT_SYMBOL_GPL(md_new_event);
35984@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
35985 */
35986 static void md_new_event_inintr(mddev_t *mddev)
35987 {
35988- atomic_inc(&md_event_count);
35989+ atomic_inc_unchecked(&md_event_count);
35990 wake_up(&md_event_waiters);
35991 }
35992
35993@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
35994
35995 rdev->preferred_minor = 0xffff;
35996 rdev->data_offset = le64_to_cpu(sb->data_offset);
35997- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35998+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35999
36000 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36001 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36002@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36003 else
36004 sb->resync_offset = cpu_to_le64(0);
36005
36006- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36007+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36008
36009 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36010 sb->size = cpu_to_le64(mddev->dev_sectors);
36011@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36012 static ssize_t
36013 errors_show(mdk_rdev_t *rdev, char *page)
36014 {
36015- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36016+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36017 }
36018
36019 static ssize_t
36020@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36021 char *e;
36022 unsigned long n = simple_strtoul(buf, &e, 10);
36023 if (*buf && (*e == 0 || *e == '\n')) {
36024- atomic_set(&rdev->corrected_errors, n);
36025+ atomic_set_unchecked(&rdev->corrected_errors, n);
36026 return len;
36027 }
36028 return -EINVAL;
36029@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36030 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36031 kfree(rdev);
36032 }
36033-static struct sysfs_ops rdev_sysfs_ops = {
36034+static const struct sysfs_ops rdev_sysfs_ops = {
36035 .show = rdev_attr_show,
36036 .store = rdev_attr_store,
36037 };
36038@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36039 rdev->data_offset = 0;
36040 rdev->sb_events = 0;
36041 atomic_set(&rdev->nr_pending, 0);
36042- atomic_set(&rdev->read_errors, 0);
36043- atomic_set(&rdev->corrected_errors, 0);
36044+ atomic_set_unchecked(&rdev->read_errors, 0);
36045+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36046
36047 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36048 if (!size) {
36049@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36050 kfree(mddev);
36051 }
36052
36053-static struct sysfs_ops md_sysfs_ops = {
36054+static const struct sysfs_ops md_sysfs_ops = {
36055 .show = md_attr_show,
36056 .store = md_attr_store,
36057 };
36058@@ -4482,7 +4482,8 @@ out:
36059 err = 0;
36060 blk_integrity_unregister(disk);
36061 md_new_event(mddev);
36062- sysfs_notify_dirent(mddev->sysfs_state);
36063+ if (mddev->sysfs_state)
36064+ sysfs_notify_dirent(mddev->sysfs_state);
36065 return err;
36066 }
36067
36068@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36069
36070 spin_unlock(&pers_lock);
36071 seq_printf(seq, "\n");
36072- mi->event = atomic_read(&md_event_count);
36073+ mi->event = atomic_read_unchecked(&md_event_count);
36074 return 0;
36075 }
36076 if (v == (void*)2) {
36077@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36078 chunk_kb ? "KB" : "B");
36079 if (bitmap->file) {
36080 seq_printf(seq, ", file: ");
36081- seq_path(seq, &bitmap->file->f_path, " \t\n");
36082+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36083 }
36084
36085 seq_printf(seq, "\n");
36086@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36087 else {
36088 struct seq_file *p = file->private_data;
36089 p->private = mi;
36090- mi->event = atomic_read(&md_event_count);
36091+ mi->event = atomic_read_unchecked(&md_event_count);
36092 }
36093 return error;
36094 }
36095@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36096 /* always allow read */
36097 mask = POLLIN | POLLRDNORM;
36098
36099- if (mi->event != atomic_read(&md_event_count))
36100+ if (mi->event != atomic_read_unchecked(&md_event_count))
36101 mask |= POLLERR | POLLPRI;
36102 return mask;
36103 }
36104@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36105 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36106 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36107 (int)part_stat_read(&disk->part0, sectors[1]) -
36108- atomic_read(&disk->sync_io);
36109+ atomic_read_unchecked(&disk->sync_io);
36110 /* sync IO will cause sync_io to increase before the disk_stats
36111 * as sync_io is counted when a request starts, and
36112 * disk_stats is counted when it completes.
36113diff --git a/drivers/md/md.h b/drivers/md/md.h
36114index 87430fe..0024a4c 100644
36115--- a/drivers/md/md.h
36116+++ b/drivers/md/md.h
36117@@ -94,10 +94,10 @@ struct mdk_rdev_s
36118 * only maintained for arrays that
36119 * support hot removal
36120 */
36121- atomic_t read_errors; /* number of consecutive read errors that
36122+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36123 * we have tried to ignore.
36124 */
36125- atomic_t corrected_errors; /* number of corrected read errors,
36126+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36127 * for reporting to userspace and storing
36128 * in superblock.
36129 */
36130@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36131
36132 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36133 {
36134- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36135+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36136 }
36137
36138 struct mdk_personality
36139diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36140index 968cb14..f0ad2e4 100644
36141--- a/drivers/md/raid1.c
36142+++ b/drivers/md/raid1.c
36143@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36144 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36145 continue;
36146 rdev = conf->mirrors[d].rdev;
36147- atomic_add(s, &rdev->corrected_errors);
36148+ atomic_add_unchecked(s, &rdev->corrected_errors);
36149 if (sync_page_io(rdev->bdev,
36150 sect + rdev->data_offset,
36151 s<<9,
36152@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36153 /* Well, this device is dead */
36154 md_error(mddev, rdev);
36155 else {
36156- atomic_add(s, &rdev->corrected_errors);
36157+ atomic_add_unchecked(s, &rdev->corrected_errors);
36158 printk(KERN_INFO
36159 "raid1:%s: read error corrected "
36160 "(%d sectors at %llu on %s)\n",
36161diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36162index 1b4e232..cf0f534 100644
36163--- a/drivers/md/raid10.c
36164+++ b/drivers/md/raid10.c
36165@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36166 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36167 set_bit(R10BIO_Uptodate, &r10_bio->state);
36168 else {
36169- atomic_add(r10_bio->sectors,
36170+ atomic_add_unchecked(r10_bio->sectors,
36171 &conf->mirrors[d].rdev->corrected_errors);
36172 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36173 md_error(r10_bio->mddev,
36174@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36175 test_bit(In_sync, &rdev->flags)) {
36176 atomic_inc(&rdev->nr_pending);
36177 rcu_read_unlock();
36178- atomic_add(s, &rdev->corrected_errors);
36179+ atomic_add_unchecked(s, &rdev->corrected_errors);
36180 if (sync_page_io(rdev->bdev,
36181 r10_bio->devs[sl].addr +
36182 sect + rdev->data_offset,
36183diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36184index 883215d..675bf47 100644
36185--- a/drivers/md/raid5.c
36186+++ b/drivers/md/raid5.c
36187@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36188 bi->bi_next = NULL;
36189 if ((rw & WRITE) &&
36190 test_bit(R5_ReWrite, &sh->dev[i].flags))
36191- atomic_add(STRIPE_SECTORS,
36192+ atomic_add_unchecked(STRIPE_SECTORS,
36193 &rdev->corrected_errors);
36194 generic_make_request(bi);
36195 } else {
36196@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36197 clear_bit(R5_ReadError, &sh->dev[i].flags);
36198 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36199 }
36200- if (atomic_read(&conf->disks[i].rdev->read_errors))
36201- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36202+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36203+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36204 } else {
36205 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36206 int retry = 0;
36207 rdev = conf->disks[i].rdev;
36208
36209 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36210- atomic_inc(&rdev->read_errors);
36211+ atomic_inc_unchecked(&rdev->read_errors);
36212 if (conf->mddev->degraded >= conf->max_degraded)
36213 printk_rl(KERN_WARNING
36214 "raid5:%s: read error not correctable "
36215@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36216 (unsigned long long)(sh->sector
36217 + rdev->data_offset),
36218 bdn);
36219- else if (atomic_read(&rdev->read_errors)
36220+ else if (atomic_read_unchecked(&rdev->read_errors)
36221 > conf->max_nr_stripes)
36222 printk(KERN_WARNING
36223 "raid5:%s: Too many read errors, failing device %s.\n",
36224@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36225 sector_t r_sector;
36226 struct stripe_head sh2;
36227
36228+ pax_track_stack();
36229
36230 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36231 stripe = new_sector;
36232diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36233index 05bde9c..2f31d40 100644
36234--- a/drivers/media/common/saa7146_hlp.c
36235+++ b/drivers/media/common/saa7146_hlp.c
36236@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36237
36238 int x[32], y[32], w[32], h[32];
36239
36240+ pax_track_stack();
36241+
36242 /* clear out memory */
36243 memset(&line_list[0], 0x00, sizeof(u32)*32);
36244 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36245diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36246index cb22da5..82b686e 100644
36247--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36248+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36249@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36250 u8 buf[HOST_LINK_BUF_SIZE];
36251 int i;
36252
36253+ pax_track_stack();
36254+
36255 dprintk("%s\n", __func__);
36256
36257 /* check if we have space for a link buf in the rx_buffer */
36258@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36259 unsigned long timeout;
36260 int written;
36261
36262+ pax_track_stack();
36263+
36264 dprintk("%s\n", __func__);
36265
36266 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36267diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36268index 2fe05d0..a3289c4 100644
36269--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36270+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36271@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36272 union {
36273 dmx_ts_cb ts;
36274 dmx_section_cb sec;
36275- } cb;
36276+ } __no_const cb;
36277
36278 struct dvb_demux *demux;
36279 void *priv;
36280diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36281index 94159b9..376bd8e 100644
36282--- a/drivers/media/dvb/dvb-core/dvbdev.c
36283+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36284@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36285 const struct dvb_device *template, void *priv, int type)
36286 {
36287 struct dvb_device *dvbdev;
36288- struct file_operations *dvbdevfops;
36289+ file_operations_no_const *dvbdevfops;
36290 struct device *clsdev;
36291 int minor;
36292 int id;
36293diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36294index 2a53dd0..db8c07a 100644
36295--- a/drivers/media/dvb/dvb-usb/cxusb.c
36296+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36297@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36298 struct dib0700_adapter_state {
36299 int (*set_param_save) (struct dvb_frontend *,
36300 struct dvb_frontend_parameters *);
36301-};
36302+} __no_const;
36303
36304 static int dib7070_set_param_override(struct dvb_frontend *fe,
36305 struct dvb_frontend_parameters *fep)
36306diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36307index db7f7f7..f55e96f 100644
36308--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36309+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36310@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36311
36312 u8 buf[260];
36313
36314+ pax_track_stack();
36315+
36316 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36317 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36318
36319diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36320index 524acf5..5ffc403 100644
36321--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36322+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36323@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36324
36325 struct dib0700_adapter_state {
36326 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36327-};
36328+} __no_const;
36329
36330 /* Hauppauge Nova-T 500 (aka Bristol)
36331 * has a LNA on GPIO0 which is enabled by setting 1 */
36332diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36333index ba91735..4261d84 100644
36334--- a/drivers/media/dvb/frontends/dib3000.h
36335+++ b/drivers/media/dvb/frontends/dib3000.h
36336@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36337 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36338 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36339 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36340-};
36341+} __no_const;
36342
36343 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36344 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36345diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36346index c709ce6..b3fe620 100644
36347--- a/drivers/media/dvb/frontends/or51211.c
36348+++ b/drivers/media/dvb/frontends/or51211.c
36349@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36350 u8 tudata[585];
36351 int i;
36352
36353+ pax_track_stack();
36354+
36355 dprintk("Firmware is %zd bytes\n",fw->size);
36356
36357 /* Get eprom data */
36358diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36359index 482d0f3..ee1e202 100644
36360--- a/drivers/media/radio/radio-cadet.c
36361+++ b/drivers/media/radio/radio-cadet.c
36362@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36363 while (i < count && dev->rdsin != dev->rdsout)
36364 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36365
36366- if (copy_to_user(data, readbuf, i))
36367+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36368 return -EFAULT;
36369 return i;
36370 }
36371diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36372index 6dd51e2..0359b92 100644
36373--- a/drivers/media/video/cx18/cx18-driver.c
36374+++ b/drivers/media/video/cx18/cx18-driver.c
36375@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36376
36377 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36378
36379-static atomic_t cx18_instance = ATOMIC_INIT(0);
36380+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36381
36382 /* Parameter declarations */
36383 static int cardtype[CX18_MAX_CARDS];
36384@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36385 struct i2c_client c;
36386 u8 eedata[256];
36387
36388+ pax_track_stack();
36389+
36390 memset(&c, 0, sizeof(c));
36391 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36392 c.adapter = &cx->i2c_adap[0];
36393@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36394 struct cx18 *cx;
36395
36396 /* FIXME - module parameter arrays constrain max instances */
36397- i = atomic_inc_return(&cx18_instance) - 1;
36398+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36399 if (i >= CX18_MAX_CARDS) {
36400 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36401 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36402diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36403index 463ec34..2f4625a 100644
36404--- a/drivers/media/video/ivtv/ivtv-driver.c
36405+++ b/drivers/media/video/ivtv/ivtv-driver.c
36406@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36407 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36408
36409 /* ivtv instance counter */
36410-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36411+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36412
36413 /* Parameter declarations */
36414 static int cardtype[IVTV_MAX_CARDS];
36415diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36416index 5fc4ac0..652a54a 100644
36417--- a/drivers/media/video/omap24xxcam.c
36418+++ b/drivers/media/video/omap24xxcam.c
36419@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36420 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36421
36422 do_gettimeofday(&vb->ts);
36423- vb->field_count = atomic_add_return(2, &fh->field_count);
36424+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36425 if (csr & csr_error) {
36426 vb->state = VIDEOBUF_ERROR;
36427 if (!atomic_read(&fh->cam->in_reset)) {
36428diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36429index 2ce67f5..cf26a5b 100644
36430--- a/drivers/media/video/omap24xxcam.h
36431+++ b/drivers/media/video/omap24xxcam.h
36432@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36433 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36434 struct videobuf_queue vbq;
36435 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36436- atomic_t field_count; /* field counter for videobuf_buffer */
36437+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36438 /* accessing cam here doesn't need serialisation: it's constant */
36439 struct omap24xxcam_device *cam;
36440 };
36441diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36442index 299afa4..eb47459 100644
36443--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36444+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36445@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36446 u8 *eeprom;
36447 struct tveeprom tvdata;
36448
36449+ pax_track_stack();
36450+
36451 memset(&tvdata,0,sizeof(tvdata));
36452
36453 eeprom = pvr2_eeprom_fetch(hdw);
36454diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36455index 5b152ff..3320638 100644
36456--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36457+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36458@@ -195,7 +195,7 @@ struct pvr2_hdw {
36459
36460 /* I2C stuff */
36461 struct i2c_adapter i2c_adap;
36462- struct i2c_algorithm i2c_algo;
36463+ i2c_algorithm_no_const i2c_algo;
36464 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36465 int i2c_cx25840_hack_state;
36466 int i2c_linked;
36467diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36468index 1eabff6..8e2313a 100644
36469--- a/drivers/media/video/saa7134/saa6752hs.c
36470+++ b/drivers/media/video/saa7134/saa6752hs.c
36471@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36472 unsigned char localPAT[256];
36473 unsigned char localPMT[256];
36474
36475+ pax_track_stack();
36476+
36477 /* Set video format - must be done first as it resets other settings */
36478 set_reg8(client, 0x41, h->video_format);
36479
36480diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36481index 9c1d3ac..b1b49e9 100644
36482--- a/drivers/media/video/saa7164/saa7164-cmd.c
36483+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36484@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36485 wait_queue_head_t *q = 0;
36486 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36487
36488+ pax_track_stack();
36489+
36490 /* While any outstand message on the bus exists... */
36491 do {
36492
36493@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36494 u8 tmp[512];
36495 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36496
36497+ pax_track_stack();
36498+
36499 while (loop) {
36500
36501 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36502diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36503index b085496..cde0270 100644
36504--- a/drivers/media/video/usbvideo/ibmcam.c
36505+++ b/drivers/media/video/usbvideo/ibmcam.c
36506@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36507 static int __init ibmcam_init(void)
36508 {
36509 struct usbvideo_cb cbTbl;
36510- memset(&cbTbl, 0, sizeof(cbTbl));
36511- cbTbl.probe = ibmcam_probe;
36512- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36513- cbTbl.videoStart = ibmcam_video_start;
36514- cbTbl.videoStop = ibmcam_video_stop;
36515- cbTbl.processData = ibmcam_ProcessIsocData;
36516- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36517- cbTbl.adjustPicture = ibmcam_adjust_picture;
36518- cbTbl.getFPS = ibmcam_calculate_fps;
36519+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
36520+ *(void **)&cbTbl.probe = ibmcam_probe;
36521+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36522+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
36523+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36524+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36525+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36526+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36527+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36528 return usbvideo_register(
36529 &cams,
36530 MAX_IBMCAM,
36531diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36532index 31d57f2..600b735 100644
36533--- a/drivers/media/video/usbvideo/konicawc.c
36534+++ b/drivers/media/video/usbvideo/konicawc.c
36535@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36536 int error;
36537
36538 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36539- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36540+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36541
36542 cam->input = input_dev = input_allocate_device();
36543 if (!input_dev) {
36544@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36545 struct usbvideo_cb cbTbl;
36546 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36547 DRIVER_DESC "\n");
36548- memset(&cbTbl, 0, sizeof(cbTbl));
36549- cbTbl.probe = konicawc_probe;
36550- cbTbl.setupOnOpen = konicawc_setup_on_open;
36551- cbTbl.processData = konicawc_process_isoc;
36552- cbTbl.getFPS = konicawc_calculate_fps;
36553- cbTbl.setVideoMode = konicawc_set_video_mode;
36554- cbTbl.startDataPump = konicawc_start_data;
36555- cbTbl.stopDataPump = konicawc_stop_data;
36556- cbTbl.adjustPicture = konicawc_adjust_picture;
36557- cbTbl.userFree = konicawc_free_uvd;
36558+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
36559+ *(void **)&cbTbl.probe = konicawc_probe;
36560+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36561+ *(void **)&cbTbl.processData = konicawc_process_isoc;
36562+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36563+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36564+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
36565+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36566+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36567+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
36568 return usbvideo_register(
36569 &cams,
36570 MAX_CAMERAS,
36571diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36572index 803d3e4..c4d1b96 100644
36573--- a/drivers/media/video/usbvideo/quickcam_messenger.c
36574+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36575@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36576 int error;
36577
36578 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36579- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36580+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36581
36582 cam->input = input_dev = input_allocate_device();
36583 if (!input_dev) {
36584diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36585index fbd1b63..292f9f0 100644
36586--- a/drivers/media/video/usbvideo/ultracam.c
36587+++ b/drivers/media/video/usbvideo/ultracam.c
36588@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36589 {
36590 struct usbvideo_cb cbTbl;
36591 memset(&cbTbl, 0, sizeof(cbTbl));
36592- cbTbl.probe = ultracam_probe;
36593- cbTbl.setupOnOpen = ultracam_setup_on_open;
36594- cbTbl.videoStart = ultracam_video_start;
36595- cbTbl.videoStop = ultracam_video_stop;
36596- cbTbl.processData = ultracam_ProcessIsocData;
36597- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36598- cbTbl.adjustPicture = ultracam_adjust_picture;
36599- cbTbl.getFPS = ultracam_calculate_fps;
36600+ *(void **)&cbTbl.probe = ultracam_probe;
36601+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36602+ *(void **)&cbTbl.videoStart = ultracam_video_start;
36603+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
36604+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36605+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36606+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36607+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36608 return usbvideo_register(
36609 &cams,
36610 MAX_CAMERAS,
36611diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36612index dea8b32..34f6878 100644
36613--- a/drivers/media/video/usbvideo/usbvideo.c
36614+++ b/drivers/media/video/usbvideo/usbvideo.c
36615@@ -697,15 +697,15 @@ int usbvideo_register(
36616 __func__, cams, base_size, num_cams);
36617
36618 /* Copy callbacks, apply defaults for those that are not set */
36619- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36620+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36621 if (cams->cb.getFrame == NULL)
36622- cams->cb.getFrame = usbvideo_GetFrame;
36623+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36624 if (cams->cb.disconnect == NULL)
36625- cams->cb.disconnect = usbvideo_Disconnect;
36626+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36627 if (cams->cb.startDataPump == NULL)
36628- cams->cb.startDataPump = usbvideo_StartDataPump;
36629+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36630 if (cams->cb.stopDataPump == NULL)
36631- cams->cb.stopDataPump = usbvideo_StopDataPump;
36632+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36633
36634 cams->num_cameras = num_cams;
36635 cams->cam = (struct uvd *) &cams[1];
36636diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36637index c66985b..7fa143a 100644
36638--- a/drivers/media/video/usbvideo/usbvideo.h
36639+++ b/drivers/media/video/usbvideo/usbvideo.h
36640@@ -268,7 +268,7 @@ struct usbvideo_cb {
36641 int (*startDataPump)(struct uvd *uvd);
36642 void (*stopDataPump)(struct uvd *uvd);
36643 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36644-};
36645+} __no_const;
36646
36647 struct usbvideo {
36648 int num_cameras; /* As allocated */
36649diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36650index e0f91e4..37554ea 100644
36651--- a/drivers/media/video/usbvision/usbvision-core.c
36652+++ b/drivers/media/video/usbvision/usbvision-core.c
36653@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36654 unsigned char rv, gv, bv;
36655 static unsigned char *Y, *U, *V;
36656
36657+ pax_track_stack();
36658+
36659 frame = usbvision->curFrame;
36660 imageSize = frame->frmwidth * frame->frmheight;
36661 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36662diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36663index 0d06e7c..3d17d24 100644
36664--- a/drivers/media/video/v4l2-device.c
36665+++ b/drivers/media/video/v4l2-device.c
36666@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36667 EXPORT_SYMBOL_GPL(v4l2_device_register);
36668
36669 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36670- atomic_t *instance)
36671+ atomic_unchecked_t *instance)
36672 {
36673- int num = atomic_inc_return(instance) - 1;
36674+ int num = atomic_inc_return_unchecked(instance) - 1;
36675 int len = strlen(basename);
36676
36677 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36678diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36679index 032ebae..6a3532c 100644
36680--- a/drivers/media/video/videobuf-dma-sg.c
36681+++ b/drivers/media/video/videobuf-dma-sg.c
36682@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36683 {
36684 struct videobuf_queue q;
36685
36686+ pax_track_stack();
36687+
36688 /* Required to make generic handler to call __videobuf_alloc */
36689 q.int_ops = &sg_ops;
36690
36691diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36692index b6992b7..9fa7547 100644
36693--- a/drivers/message/fusion/mptbase.c
36694+++ b/drivers/message/fusion/mptbase.c
36695@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36696 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36697 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36698
36699+#ifdef CONFIG_GRKERNSEC_HIDESYM
36700+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36701+ NULL, NULL);
36702+#else
36703 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36704 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36705+#endif
36706+
36707 /*
36708 * Rounding UP to nearest 4-kB boundary here...
36709 */
36710diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36711index 83873e3..e360e9a 100644
36712--- a/drivers/message/fusion/mptsas.c
36713+++ b/drivers/message/fusion/mptsas.c
36714@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36715 return 0;
36716 }
36717
36718+static inline void
36719+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36720+{
36721+ if (phy_info->port_details) {
36722+ phy_info->port_details->rphy = rphy;
36723+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36724+ ioc->name, rphy));
36725+ }
36726+
36727+ if (rphy) {
36728+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36729+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36730+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36731+ ioc->name, rphy, rphy->dev.release));
36732+ }
36733+}
36734+
36735 /* no mutex */
36736 static void
36737 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36738@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36739 return NULL;
36740 }
36741
36742-static inline void
36743-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36744-{
36745- if (phy_info->port_details) {
36746- phy_info->port_details->rphy = rphy;
36747- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36748- ioc->name, rphy));
36749- }
36750-
36751- if (rphy) {
36752- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36753- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36754- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36755- ioc->name, rphy, rphy->dev.release));
36756- }
36757-}
36758-
36759 static inline struct sas_port *
36760 mptsas_get_port(struct mptsas_phyinfo *phy_info)
36761 {
36762diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
36763index bd096ca..332cf76 100644
36764--- a/drivers/message/fusion/mptscsih.c
36765+++ b/drivers/message/fusion/mptscsih.c
36766@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
36767
36768 h = shost_priv(SChost);
36769
36770- if (h) {
36771- if (h->info_kbuf == NULL)
36772- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36773- return h->info_kbuf;
36774- h->info_kbuf[0] = '\0';
36775+ if (!h)
36776+ return NULL;
36777
36778- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36779- h->info_kbuf[size-1] = '\0';
36780- }
36781+ if (h->info_kbuf == NULL)
36782+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36783+ return h->info_kbuf;
36784+ h->info_kbuf[0] = '\0';
36785+
36786+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36787+ h->info_kbuf[size-1] = '\0';
36788
36789 return h->info_kbuf;
36790 }
36791diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
36792index efba702..59b2c0f 100644
36793--- a/drivers/message/i2o/i2o_config.c
36794+++ b/drivers/message/i2o/i2o_config.c
36795@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
36796 struct i2o_message *msg;
36797 unsigned int iop;
36798
36799+ pax_track_stack();
36800+
36801 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
36802 return -EFAULT;
36803
36804diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
36805index 7045c45..c07b170 100644
36806--- a/drivers/message/i2o/i2o_proc.c
36807+++ b/drivers/message/i2o/i2o_proc.c
36808@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
36809 "Array Controller Device"
36810 };
36811
36812-static char *chtostr(u8 * chars, int n)
36813-{
36814- char tmp[256];
36815- tmp[0] = 0;
36816- return strncat(tmp, (char *)chars, n);
36817-}
36818-
36819 static int i2o_report_query_status(struct seq_file *seq, int block_status,
36820 char *group)
36821 {
36822@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
36823
36824 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
36825 seq_printf(seq, "%-#8x", ddm_table.module_id);
36826- seq_printf(seq, "%-29s",
36827- chtostr(ddm_table.module_name_version, 28));
36828+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
36829 seq_printf(seq, "%9d ", ddm_table.data_size);
36830 seq_printf(seq, "%8d", ddm_table.code_size);
36831
36832@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
36833
36834 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
36835 seq_printf(seq, "%-#8x", dst->module_id);
36836- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
36837- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
36838+ seq_printf(seq, "%-.28s", dst->module_name_version);
36839+ seq_printf(seq, "%-.8s", dst->date);
36840 seq_printf(seq, "%8d ", dst->module_size);
36841 seq_printf(seq, "%8d ", dst->mpb_size);
36842 seq_printf(seq, "0x%04x", dst->module_flags);
36843@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
36844 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
36845 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
36846 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
36847- seq_printf(seq, "Vendor info : %s\n",
36848- chtostr((u8 *) (work32 + 2), 16));
36849- seq_printf(seq, "Product info : %s\n",
36850- chtostr((u8 *) (work32 + 6), 16));
36851- seq_printf(seq, "Description : %s\n",
36852- chtostr((u8 *) (work32 + 10), 16));
36853- seq_printf(seq, "Product rev. : %s\n",
36854- chtostr((u8 *) (work32 + 14), 8));
36855+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
36856+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
36857+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
36858+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
36859
36860 seq_printf(seq, "Serial number : ");
36861 print_serial_number(seq, (u8 *) (work32 + 16),
36862@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
36863 }
36864
36865 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
36866- seq_printf(seq, "Module name : %s\n",
36867- chtostr(result.module_name, 24));
36868- seq_printf(seq, "Module revision : %s\n",
36869- chtostr(result.module_rev, 8));
36870+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
36871+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
36872
36873 seq_printf(seq, "Serial number : ");
36874 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
36875@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
36876 return 0;
36877 }
36878
36879- seq_printf(seq, "Device name : %s\n",
36880- chtostr(result.device_name, 64));
36881- seq_printf(seq, "Service name : %s\n",
36882- chtostr(result.service_name, 64));
36883- seq_printf(seq, "Physical name : %s\n",
36884- chtostr(result.physical_location, 64));
36885- seq_printf(seq, "Instance number : %s\n",
36886- chtostr(result.instance_number, 4));
36887+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
36888+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
36889+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
36890+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
36891
36892 return 0;
36893 }
36894diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
36895index 27cf4af..b1205b8 100644
36896--- a/drivers/message/i2o/iop.c
36897+++ b/drivers/message/i2o/iop.c
36898@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
36899
36900 spin_lock_irqsave(&c->context_list_lock, flags);
36901
36902- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
36903- atomic_inc(&c->context_list_counter);
36904+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
36905+ atomic_inc_unchecked(&c->context_list_counter);
36906
36907- entry->context = atomic_read(&c->context_list_counter);
36908+ entry->context = atomic_read_unchecked(&c->context_list_counter);
36909
36910 list_add(&entry->list, &c->context_list);
36911
36912@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
36913
36914 #if BITS_PER_LONG == 64
36915 spin_lock_init(&c->context_list_lock);
36916- atomic_set(&c->context_list_counter, 0);
36917+ atomic_set_unchecked(&c->context_list_counter, 0);
36918 INIT_LIST_HEAD(&c->context_list);
36919 #endif
36920
36921diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
36922index 78e3e85..66c9a0d 100644
36923--- a/drivers/mfd/ab3100-core.c
36924+++ b/drivers/mfd/ab3100-core.c
36925@@ -777,7 +777,7 @@ struct ab_family_id {
36926 char *name;
36927 };
36928
36929-static const struct ab_family_id ids[] __initdata = {
36930+static const struct ab_family_id ids[] __initconst = {
36931 /* AB3100 */
36932 {
36933 .id = 0xc0,
36934diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
36935index 8d8c932..8104515 100644
36936--- a/drivers/mfd/wm8350-i2c.c
36937+++ b/drivers/mfd/wm8350-i2c.c
36938@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
36939 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
36940 int ret;
36941
36942+ pax_track_stack();
36943+
36944 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
36945 return -EINVAL;
36946
36947diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
36948index e4ff50b..4cc3f04 100644
36949--- a/drivers/misc/kgdbts.c
36950+++ b/drivers/misc/kgdbts.c
36951@@ -118,7 +118,7 @@
36952 } while (0)
36953 #define MAX_CONFIG_LEN 40
36954
36955-static struct kgdb_io kgdbts_io_ops;
36956+static const struct kgdb_io kgdbts_io_ops;
36957 static char get_buf[BUFMAX];
36958 static int get_buf_cnt;
36959 static char put_buf[BUFMAX];
36960@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
36961 module_put(THIS_MODULE);
36962 }
36963
36964-static struct kgdb_io kgdbts_io_ops = {
36965+static const struct kgdb_io kgdbts_io_ops = {
36966 .name = "kgdbts",
36967 .read_char = kgdbts_get_char,
36968 .write_char = kgdbts_put_char,
36969diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
36970index 37e7cfc..67cfb76 100644
36971--- a/drivers/misc/sgi-gru/gruhandles.c
36972+++ b/drivers/misc/sgi-gru/gruhandles.c
36973@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
36974
36975 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
36976 {
36977- atomic_long_inc(&mcs_op_statistics[op].count);
36978- atomic_long_add(clks, &mcs_op_statistics[op].total);
36979+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
36980+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
36981 if (mcs_op_statistics[op].max < clks)
36982 mcs_op_statistics[op].max = clks;
36983 }
36984diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
36985index 3f2375c..467c6e6 100644
36986--- a/drivers/misc/sgi-gru/gruprocfs.c
36987+++ b/drivers/misc/sgi-gru/gruprocfs.c
36988@@ -32,9 +32,9 @@
36989
36990 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
36991
36992-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36993+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
36994 {
36995- unsigned long val = atomic_long_read(v);
36996+ unsigned long val = atomic_long_read_unchecked(v);
36997
36998 if (val)
36999 seq_printf(s, "%16lu %s\n", val, id);
37000@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37001 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37002
37003 for (op = 0; op < mcsop_last; op++) {
37004- count = atomic_long_read(&mcs_op_statistics[op].count);
37005- total = atomic_long_read(&mcs_op_statistics[op].total);
37006+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37007+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37008 max = mcs_op_statistics[op].max;
37009 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37010 count ? total / count : 0, max);
37011diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37012index 46990bc..4a251b5 100644
37013--- a/drivers/misc/sgi-gru/grutables.h
37014+++ b/drivers/misc/sgi-gru/grutables.h
37015@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37016 * GRU statistics.
37017 */
37018 struct gru_stats_s {
37019- atomic_long_t vdata_alloc;
37020- atomic_long_t vdata_free;
37021- atomic_long_t gts_alloc;
37022- atomic_long_t gts_free;
37023- atomic_long_t vdata_double_alloc;
37024- atomic_long_t gts_double_allocate;
37025- atomic_long_t assign_context;
37026- atomic_long_t assign_context_failed;
37027- atomic_long_t free_context;
37028- atomic_long_t load_user_context;
37029- atomic_long_t load_kernel_context;
37030- atomic_long_t lock_kernel_context;
37031- atomic_long_t unlock_kernel_context;
37032- atomic_long_t steal_user_context;
37033- atomic_long_t steal_kernel_context;
37034- atomic_long_t steal_context_failed;
37035- atomic_long_t nopfn;
37036- atomic_long_t break_cow;
37037- atomic_long_t asid_new;
37038- atomic_long_t asid_next;
37039- atomic_long_t asid_wrap;
37040- atomic_long_t asid_reuse;
37041- atomic_long_t intr;
37042- atomic_long_t intr_mm_lock_failed;
37043- atomic_long_t call_os;
37044- atomic_long_t call_os_offnode_reference;
37045- atomic_long_t call_os_check_for_bug;
37046- atomic_long_t call_os_wait_queue;
37047- atomic_long_t user_flush_tlb;
37048- atomic_long_t user_unload_context;
37049- atomic_long_t user_exception;
37050- atomic_long_t set_context_option;
37051- atomic_long_t migrate_check;
37052- atomic_long_t migrated_retarget;
37053- atomic_long_t migrated_unload;
37054- atomic_long_t migrated_unload_delay;
37055- atomic_long_t migrated_nopfn_retarget;
37056- atomic_long_t migrated_nopfn_unload;
37057- atomic_long_t tlb_dropin;
37058- atomic_long_t tlb_dropin_fail_no_asid;
37059- atomic_long_t tlb_dropin_fail_upm;
37060- atomic_long_t tlb_dropin_fail_invalid;
37061- atomic_long_t tlb_dropin_fail_range_active;
37062- atomic_long_t tlb_dropin_fail_idle;
37063- atomic_long_t tlb_dropin_fail_fmm;
37064- atomic_long_t tlb_dropin_fail_no_exception;
37065- atomic_long_t tlb_dropin_fail_no_exception_war;
37066- atomic_long_t tfh_stale_on_fault;
37067- atomic_long_t mmu_invalidate_range;
37068- atomic_long_t mmu_invalidate_page;
37069- atomic_long_t mmu_clear_flush_young;
37070- atomic_long_t flush_tlb;
37071- atomic_long_t flush_tlb_gru;
37072- atomic_long_t flush_tlb_gru_tgh;
37073- atomic_long_t flush_tlb_gru_zero_asid;
37074-
37075- atomic_long_t copy_gpa;
37076-
37077- atomic_long_t mesq_receive;
37078- atomic_long_t mesq_receive_none;
37079- atomic_long_t mesq_send;
37080- atomic_long_t mesq_send_failed;
37081- atomic_long_t mesq_noop;
37082- atomic_long_t mesq_send_unexpected_error;
37083- atomic_long_t mesq_send_lb_overflow;
37084- atomic_long_t mesq_send_qlimit_reached;
37085- atomic_long_t mesq_send_amo_nacked;
37086- atomic_long_t mesq_send_put_nacked;
37087- atomic_long_t mesq_qf_not_full;
37088- atomic_long_t mesq_qf_locked;
37089- atomic_long_t mesq_qf_noop_not_full;
37090- atomic_long_t mesq_qf_switch_head_failed;
37091- atomic_long_t mesq_qf_unexpected_error;
37092- atomic_long_t mesq_noop_unexpected_error;
37093- atomic_long_t mesq_noop_lb_overflow;
37094- atomic_long_t mesq_noop_qlimit_reached;
37095- atomic_long_t mesq_noop_amo_nacked;
37096- atomic_long_t mesq_noop_put_nacked;
37097+ atomic_long_unchecked_t vdata_alloc;
37098+ atomic_long_unchecked_t vdata_free;
37099+ atomic_long_unchecked_t gts_alloc;
37100+ atomic_long_unchecked_t gts_free;
37101+ atomic_long_unchecked_t vdata_double_alloc;
37102+ atomic_long_unchecked_t gts_double_allocate;
37103+ atomic_long_unchecked_t assign_context;
37104+ atomic_long_unchecked_t assign_context_failed;
37105+ atomic_long_unchecked_t free_context;
37106+ atomic_long_unchecked_t load_user_context;
37107+ atomic_long_unchecked_t load_kernel_context;
37108+ atomic_long_unchecked_t lock_kernel_context;
37109+ atomic_long_unchecked_t unlock_kernel_context;
37110+ atomic_long_unchecked_t steal_user_context;
37111+ atomic_long_unchecked_t steal_kernel_context;
37112+ atomic_long_unchecked_t steal_context_failed;
37113+ atomic_long_unchecked_t nopfn;
37114+ atomic_long_unchecked_t break_cow;
37115+ atomic_long_unchecked_t asid_new;
37116+ atomic_long_unchecked_t asid_next;
37117+ atomic_long_unchecked_t asid_wrap;
37118+ atomic_long_unchecked_t asid_reuse;
37119+ atomic_long_unchecked_t intr;
37120+ atomic_long_unchecked_t intr_mm_lock_failed;
37121+ atomic_long_unchecked_t call_os;
37122+ atomic_long_unchecked_t call_os_offnode_reference;
37123+ atomic_long_unchecked_t call_os_check_for_bug;
37124+ atomic_long_unchecked_t call_os_wait_queue;
37125+ atomic_long_unchecked_t user_flush_tlb;
37126+ atomic_long_unchecked_t user_unload_context;
37127+ atomic_long_unchecked_t user_exception;
37128+ atomic_long_unchecked_t set_context_option;
37129+ atomic_long_unchecked_t migrate_check;
37130+ atomic_long_unchecked_t migrated_retarget;
37131+ atomic_long_unchecked_t migrated_unload;
37132+ atomic_long_unchecked_t migrated_unload_delay;
37133+ atomic_long_unchecked_t migrated_nopfn_retarget;
37134+ atomic_long_unchecked_t migrated_nopfn_unload;
37135+ atomic_long_unchecked_t tlb_dropin;
37136+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37137+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37138+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37139+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37140+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37141+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37142+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37143+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37144+ atomic_long_unchecked_t tfh_stale_on_fault;
37145+ atomic_long_unchecked_t mmu_invalidate_range;
37146+ atomic_long_unchecked_t mmu_invalidate_page;
37147+ atomic_long_unchecked_t mmu_clear_flush_young;
37148+ atomic_long_unchecked_t flush_tlb;
37149+ atomic_long_unchecked_t flush_tlb_gru;
37150+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37151+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37152+
37153+ atomic_long_unchecked_t copy_gpa;
37154+
37155+ atomic_long_unchecked_t mesq_receive;
37156+ atomic_long_unchecked_t mesq_receive_none;
37157+ atomic_long_unchecked_t mesq_send;
37158+ atomic_long_unchecked_t mesq_send_failed;
37159+ atomic_long_unchecked_t mesq_noop;
37160+ atomic_long_unchecked_t mesq_send_unexpected_error;
37161+ atomic_long_unchecked_t mesq_send_lb_overflow;
37162+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37163+ atomic_long_unchecked_t mesq_send_amo_nacked;
37164+ atomic_long_unchecked_t mesq_send_put_nacked;
37165+ atomic_long_unchecked_t mesq_qf_not_full;
37166+ atomic_long_unchecked_t mesq_qf_locked;
37167+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37168+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37169+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37170+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37171+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37172+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37173+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37174+ atomic_long_unchecked_t mesq_noop_put_nacked;
37175
37176 };
37177
37178@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37179 cchop_deallocate, tghop_invalidate, mcsop_last};
37180
37181 struct mcs_op_statistic {
37182- atomic_long_t count;
37183- atomic_long_t total;
37184+ atomic_long_unchecked_t count;
37185+ atomic_long_unchecked_t total;
37186 unsigned long max;
37187 };
37188
37189@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37190
37191 #define STAT(id) do { \
37192 if (gru_options & OPT_STATS) \
37193- atomic_long_inc(&gru_stats.id); \
37194+ atomic_long_inc_unchecked(&gru_stats.id); \
37195 } while (0)
37196
37197 #ifdef CONFIG_SGI_GRU_DEBUG
37198diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37199index 2275126..12a9dbfb 100644
37200--- a/drivers/misc/sgi-xp/xp.h
37201+++ b/drivers/misc/sgi-xp/xp.h
37202@@ -289,7 +289,7 @@ struct xpc_interface {
37203 xpc_notify_func, void *);
37204 void (*received) (short, int, void *);
37205 enum xp_retval (*partid_to_nasids) (short, void *);
37206-};
37207+} __no_const;
37208
37209 extern struct xpc_interface xpc_interface;
37210
37211diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37212index b94d5f7..7f494c5 100644
37213--- a/drivers/misc/sgi-xp/xpc.h
37214+++ b/drivers/misc/sgi-xp/xpc.h
37215@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37216 void (*received_payload) (struct xpc_channel *, void *);
37217 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37218 };
37219+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37220
37221 /* struct xpc_partition act_state values (for XPC HB) */
37222
37223@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37224 /* found in xpc_main.c */
37225 extern struct device *xpc_part;
37226 extern struct device *xpc_chan;
37227-extern struct xpc_arch_operations xpc_arch_ops;
37228+extern xpc_arch_operations_no_const xpc_arch_ops;
37229 extern int xpc_disengage_timelimit;
37230 extern int xpc_disengage_timedout;
37231 extern int xpc_activate_IRQ_rcvd;
37232diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37233index fd3688a..7e211a4 100644
37234--- a/drivers/misc/sgi-xp/xpc_main.c
37235+++ b/drivers/misc/sgi-xp/xpc_main.c
37236@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37237 .notifier_call = xpc_system_die,
37238 };
37239
37240-struct xpc_arch_operations xpc_arch_ops;
37241+xpc_arch_operations_no_const xpc_arch_ops;
37242
37243 /*
37244 * Timer function to enforce the timelimit on the partition disengage.
37245diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37246index 8b70e03..700bda6 100644
37247--- a/drivers/misc/sgi-xp/xpc_sn2.c
37248+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37249@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37250 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37251 }
37252
37253-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37254+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37255 .setup_partitions = xpc_setup_partitions_sn2,
37256 .teardown_partitions = xpc_teardown_partitions_sn2,
37257 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37258@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37259 int ret;
37260 size_t buf_size;
37261
37262- xpc_arch_ops = xpc_arch_ops_sn2;
37263+ pax_open_kernel();
37264+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37265+ pax_close_kernel();
37266
37267 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37268 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37269diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37270index 8e08d71..7cb8c9b 100644
37271--- a/drivers/misc/sgi-xp/xpc_uv.c
37272+++ b/drivers/misc/sgi-xp/xpc_uv.c
37273@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37274 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37275 }
37276
37277-static struct xpc_arch_operations xpc_arch_ops_uv = {
37278+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37279 .setup_partitions = xpc_setup_partitions_uv,
37280 .teardown_partitions = xpc_teardown_partitions_uv,
37281 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37282@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37283 int
37284 xpc_init_uv(void)
37285 {
37286- xpc_arch_ops = xpc_arch_ops_uv;
37287+ pax_open_kernel();
37288+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37289+ pax_close_kernel();
37290
37291 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37292 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37293diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37294index 6fd20b42..650efe3 100644
37295--- a/drivers/mmc/host/sdhci-pci.c
37296+++ b/drivers/mmc/host/sdhci-pci.c
37297@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37298 .probe = via_probe,
37299 };
37300
37301-static const struct pci_device_id pci_ids[] __devinitdata = {
37302+static const struct pci_device_id pci_ids[] __devinitconst = {
37303 {
37304 .vendor = PCI_VENDOR_ID_RICOH,
37305 .device = PCI_DEVICE_ID_RICOH_R5C822,
37306diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37307index e7563a9..5f90ce5 100644
37308--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37309+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37310@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37311 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37312 unsigned long timeo = jiffies + HZ;
37313
37314+ pax_track_stack();
37315+
37316 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37317 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37318 goto sleep;
37319@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37320 unsigned long initial_adr;
37321 int initial_len = len;
37322
37323+ pax_track_stack();
37324+
37325 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37326 adr += chip->start;
37327 initial_adr = adr;
37328@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37329 int retries = 3;
37330 int ret;
37331
37332+ pax_track_stack();
37333+
37334 adr += chip->start;
37335
37336 retry:
37337diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37338index 0667a67..3ab97ed 100644
37339--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37340+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37341@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37342 unsigned long cmd_addr;
37343 struct cfi_private *cfi = map->fldrv_priv;
37344
37345+ pax_track_stack();
37346+
37347 adr += chip->start;
37348
37349 /* Ensure cmd read/writes are aligned. */
37350@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37351 DECLARE_WAITQUEUE(wait, current);
37352 int wbufsize, z;
37353
37354+ pax_track_stack();
37355+
37356 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37357 if (adr & (map_bankwidth(map)-1))
37358 return -EINVAL;
37359@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37360 DECLARE_WAITQUEUE(wait, current);
37361 int ret = 0;
37362
37363+ pax_track_stack();
37364+
37365 adr += chip->start;
37366
37367 /* Let's determine this according to the interleave only once */
37368@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37369 unsigned long timeo = jiffies + HZ;
37370 DECLARE_WAITQUEUE(wait, current);
37371
37372+ pax_track_stack();
37373+
37374 adr += chip->start;
37375
37376 /* Let's determine this according to the interleave only once */
37377@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37378 unsigned long timeo = jiffies + HZ;
37379 DECLARE_WAITQUEUE(wait, current);
37380
37381+ pax_track_stack();
37382+
37383 adr += chip->start;
37384
37385 /* Let's determine this according to the interleave only once */
37386diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37387index 5bf5f46..c5de373 100644
37388--- a/drivers/mtd/devices/doc2000.c
37389+++ b/drivers/mtd/devices/doc2000.c
37390@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37391
37392 /* The ECC will not be calculated correctly if less than 512 is written */
37393 /* DBB-
37394- if (len != 0x200 && eccbuf)
37395+ if (len != 0x200)
37396 printk(KERN_WARNING
37397 "ECC needs a full sector write (adr: %lx size %lx)\n",
37398 (long) to, (long) len);
37399diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37400index 0990f78..bb4e8a4 100644
37401--- a/drivers/mtd/devices/doc2001.c
37402+++ b/drivers/mtd/devices/doc2001.c
37403@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37404 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37405
37406 /* Don't allow read past end of device */
37407- if (from >= this->totlen)
37408+ if (from >= this->totlen || !len)
37409 return -EINVAL;
37410
37411 /* Don't allow a single read to cross a 512-byte block boundary */
37412diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37413index e56d6b4..f07e6cf 100644
37414--- a/drivers/mtd/ftl.c
37415+++ b/drivers/mtd/ftl.c
37416@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37417 loff_t offset;
37418 uint16_t srcunitswap = cpu_to_le16(srcunit);
37419
37420+ pax_track_stack();
37421+
37422 eun = &part->EUNInfo[srcunit];
37423 xfer = &part->XferInfo[xferunit];
37424 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37425diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37426index 8aca552..146446e 100755
37427--- a/drivers/mtd/inftlcore.c
37428+++ b/drivers/mtd/inftlcore.c
37429@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37430 struct inftl_oob oob;
37431 size_t retlen;
37432
37433+ pax_track_stack();
37434+
37435 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37436 "pending=%d)\n", inftl, thisVUC, pendingblock);
37437
37438diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37439index 32e82ae..ed50953 100644
37440--- a/drivers/mtd/inftlmount.c
37441+++ b/drivers/mtd/inftlmount.c
37442@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37443 struct INFTLPartition *ip;
37444 size_t retlen;
37445
37446+ pax_track_stack();
37447+
37448 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37449
37450 /*
37451diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37452index 79bf40f..fe5f8fd 100644
37453--- a/drivers/mtd/lpddr/qinfo_probe.c
37454+++ b/drivers/mtd/lpddr/qinfo_probe.c
37455@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37456 {
37457 map_word pfow_val[4];
37458
37459+ pax_track_stack();
37460+
37461 /* Check identification string */
37462 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37463 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37464diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37465index 726a1b8..f46b460 100644
37466--- a/drivers/mtd/mtdchar.c
37467+++ b/drivers/mtd/mtdchar.c
37468@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37469 u_long size;
37470 struct mtd_info_user info;
37471
37472+ pax_track_stack();
37473+
37474 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37475
37476 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37477diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37478index 1002e18..26d82d5 100644
37479--- a/drivers/mtd/nftlcore.c
37480+++ b/drivers/mtd/nftlcore.c
37481@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37482 int inplace = 1;
37483 size_t retlen;
37484
37485+ pax_track_stack();
37486+
37487 memset(BlockMap, 0xff, sizeof(BlockMap));
37488 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37489
37490diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37491index 8b22b18..6fada85 100644
37492--- a/drivers/mtd/nftlmount.c
37493+++ b/drivers/mtd/nftlmount.c
37494@@ -23,6 +23,7 @@
37495 #include <asm/errno.h>
37496 #include <linux/delay.h>
37497 #include <linux/slab.h>
37498+#include <linux/sched.h>
37499 #include <linux/mtd/mtd.h>
37500 #include <linux/mtd/nand.h>
37501 #include <linux/mtd/nftl.h>
37502@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37503 struct mtd_info *mtd = nftl->mbd.mtd;
37504 unsigned int i;
37505
37506+ pax_track_stack();
37507+
37508 /* Assume logical EraseSize == physical erasesize for starting the scan.
37509 We'll sort it out later if we find a MediaHeader which says otherwise */
37510 /* Actually, we won't. The new DiskOnChip driver has already scanned
37511diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37512index 14cec04..d775b87 100644
37513--- a/drivers/mtd/ubi/build.c
37514+++ b/drivers/mtd/ubi/build.c
37515@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37516 static int __init bytes_str_to_int(const char *str)
37517 {
37518 char *endp;
37519- unsigned long result;
37520+ unsigned long result, scale = 1;
37521
37522 result = simple_strtoul(str, &endp, 0);
37523 if (str == endp || result >= INT_MAX) {
37524@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37525
37526 switch (*endp) {
37527 case 'G':
37528- result *= 1024;
37529+ scale *= 1024;
37530 case 'M':
37531- result *= 1024;
37532+ scale *= 1024;
37533 case 'K':
37534- result *= 1024;
37535+ scale *= 1024;
37536 if (endp[1] == 'i' && endp[2] == 'B')
37537 endp += 2;
37538 case '\0':
37539@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37540 return -EINVAL;
37541 }
37542
37543- return result;
37544+ if ((intoverflow_t)result*scale >= INT_MAX) {
37545+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37546+ str);
37547+ return -EINVAL;
37548+ }
37549+
37550+ return result*scale;
37551 }
37552
37553 /**
37554diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37555index ab68886..ca405e8 100644
37556--- a/drivers/net/atlx/atl2.c
37557+++ b/drivers/net/atlx/atl2.c
37558@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37559 */
37560
37561 #define ATL2_PARAM(X, desc) \
37562- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37563+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37564 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37565 MODULE_PARM_DESC(X, desc);
37566 #else
37567diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37568index 4874b2b..67f8526 100644
37569--- a/drivers/net/bnx2.c
37570+++ b/drivers/net/bnx2.c
37571@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37572 int rc = 0;
37573 u32 magic, csum;
37574
37575+ pax_track_stack();
37576+
37577 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37578 goto test_nvram_done;
37579
37580diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37581index fd3eb07..8a6978d 100644
37582--- a/drivers/net/cxgb3/l2t.h
37583+++ b/drivers/net/cxgb3/l2t.h
37584@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37585 */
37586 struct l2t_skb_cb {
37587 arp_failure_handler_func arp_failure_handler;
37588-};
37589+} __no_const;
37590
37591 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37592
37593diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37594index 032cfe0..411af379 100644
37595--- a/drivers/net/cxgb3/t3_hw.c
37596+++ b/drivers/net/cxgb3/t3_hw.c
37597@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37598 int i, addr, ret;
37599 struct t3_vpd vpd;
37600
37601+ pax_track_stack();
37602+
37603 /*
37604 * Card information is normally at VPD_BASE but some early cards had
37605 * it at 0.
37606diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37607index d1e0563..b9e129c 100644
37608--- a/drivers/net/e1000e/82571.c
37609+++ b/drivers/net/e1000e/82571.c
37610@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37611 {
37612 struct e1000_hw *hw = &adapter->hw;
37613 struct e1000_mac_info *mac = &hw->mac;
37614- struct e1000_mac_operations *func = &mac->ops;
37615+ e1000_mac_operations_no_const *func = &mac->ops;
37616 u32 swsm = 0;
37617 u32 swsm2 = 0;
37618 bool force_clear_smbi = false;
37619@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37620 temp = er32(ICRXDMTC);
37621 }
37622
37623-static struct e1000_mac_operations e82571_mac_ops = {
37624+static const struct e1000_mac_operations e82571_mac_ops = {
37625 /* .check_mng_mode: mac type dependent */
37626 /* .check_for_link: media type dependent */
37627 .id_led_init = e1000e_id_led_init,
37628@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37629 .setup_led = e1000e_setup_led_generic,
37630 };
37631
37632-static struct e1000_phy_operations e82_phy_ops_igp = {
37633+static const struct e1000_phy_operations e82_phy_ops_igp = {
37634 .acquire_phy = e1000_get_hw_semaphore_82571,
37635 .check_reset_block = e1000e_check_reset_block_generic,
37636 .commit_phy = NULL,
37637@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37638 .cfg_on_link_up = NULL,
37639 };
37640
37641-static struct e1000_phy_operations e82_phy_ops_m88 = {
37642+static const struct e1000_phy_operations e82_phy_ops_m88 = {
37643 .acquire_phy = e1000_get_hw_semaphore_82571,
37644 .check_reset_block = e1000e_check_reset_block_generic,
37645 .commit_phy = e1000e_phy_sw_reset,
37646@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37647 .cfg_on_link_up = NULL,
37648 };
37649
37650-static struct e1000_phy_operations e82_phy_ops_bm = {
37651+static const struct e1000_phy_operations e82_phy_ops_bm = {
37652 .acquire_phy = e1000_get_hw_semaphore_82571,
37653 .check_reset_block = e1000e_check_reset_block_generic,
37654 .commit_phy = e1000e_phy_sw_reset,
37655@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37656 .cfg_on_link_up = NULL,
37657 };
37658
37659-static struct e1000_nvm_operations e82571_nvm_ops = {
37660+static const struct e1000_nvm_operations e82571_nvm_ops = {
37661 .acquire_nvm = e1000_acquire_nvm_82571,
37662 .read_nvm = e1000e_read_nvm_eerd,
37663 .release_nvm = e1000_release_nvm_82571,
37664diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37665index 47db9bd..fa58ccd 100644
37666--- a/drivers/net/e1000e/e1000.h
37667+++ b/drivers/net/e1000e/e1000.h
37668@@ -375,9 +375,9 @@ struct e1000_info {
37669 u32 pba;
37670 u32 max_hw_frame_size;
37671 s32 (*get_variants)(struct e1000_adapter *);
37672- struct e1000_mac_operations *mac_ops;
37673- struct e1000_phy_operations *phy_ops;
37674- struct e1000_nvm_operations *nvm_ops;
37675+ const struct e1000_mac_operations *mac_ops;
37676+ const struct e1000_phy_operations *phy_ops;
37677+ const struct e1000_nvm_operations *nvm_ops;
37678 };
37679
37680 /* hardware capability, feature, and workaround flags */
37681diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37682index ae5d736..e9a93a1 100644
37683--- a/drivers/net/e1000e/es2lan.c
37684+++ b/drivers/net/e1000e/es2lan.c
37685@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37686 {
37687 struct e1000_hw *hw = &adapter->hw;
37688 struct e1000_mac_info *mac = &hw->mac;
37689- struct e1000_mac_operations *func = &mac->ops;
37690+ e1000_mac_operations_no_const *func = &mac->ops;
37691
37692 /* Set media type */
37693 switch (adapter->pdev->device) {
37694@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37695 temp = er32(ICRXDMTC);
37696 }
37697
37698-static struct e1000_mac_operations es2_mac_ops = {
37699+static const struct e1000_mac_operations es2_mac_ops = {
37700 .id_led_init = e1000e_id_led_init,
37701 .check_mng_mode = e1000e_check_mng_mode_generic,
37702 /* check_for_link dependent on media type */
37703@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37704 .setup_led = e1000e_setup_led_generic,
37705 };
37706
37707-static struct e1000_phy_operations es2_phy_ops = {
37708+static const struct e1000_phy_operations es2_phy_ops = {
37709 .acquire_phy = e1000_acquire_phy_80003es2lan,
37710 .check_reset_block = e1000e_check_reset_block_generic,
37711 .commit_phy = e1000e_phy_sw_reset,
37712@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37713 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37714 };
37715
37716-static struct e1000_nvm_operations es2_nvm_ops = {
37717+static const struct e1000_nvm_operations es2_nvm_ops = {
37718 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37719 .read_nvm = e1000e_read_nvm_eerd,
37720 .release_nvm = e1000_release_nvm_80003es2lan,
37721diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37722index 11f3b7c..6381887 100644
37723--- a/drivers/net/e1000e/hw.h
37724+++ b/drivers/net/e1000e/hw.h
37725@@ -753,6 +753,7 @@ struct e1000_mac_operations {
37726 s32 (*setup_physical_interface)(struct e1000_hw *);
37727 s32 (*setup_led)(struct e1000_hw *);
37728 };
37729+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37730
37731 /* Function pointers for the PHY. */
37732 struct e1000_phy_operations {
37733@@ -774,6 +775,7 @@ struct e1000_phy_operations {
37734 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37735 s32 (*cfg_on_link_up)(struct e1000_hw *);
37736 };
37737+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37738
37739 /* Function pointers for the NVM. */
37740 struct e1000_nvm_operations {
37741@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
37742 s32 (*validate_nvm)(struct e1000_hw *);
37743 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
37744 };
37745+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37746
37747 struct e1000_mac_info {
37748- struct e1000_mac_operations ops;
37749+ e1000_mac_operations_no_const ops;
37750
37751 u8 addr[6];
37752 u8 perm_addr[6];
37753@@ -823,7 +826,7 @@ struct e1000_mac_info {
37754 };
37755
37756 struct e1000_phy_info {
37757- struct e1000_phy_operations ops;
37758+ e1000_phy_operations_no_const ops;
37759
37760 enum e1000_phy_type type;
37761
37762@@ -857,7 +860,7 @@ struct e1000_phy_info {
37763 };
37764
37765 struct e1000_nvm_info {
37766- struct e1000_nvm_operations ops;
37767+ e1000_nvm_operations_no_const ops;
37768
37769 enum e1000_nvm_type type;
37770 enum e1000_nvm_override override;
37771diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
37772index de39f9a..e28d3e0 100644
37773--- a/drivers/net/e1000e/ich8lan.c
37774+++ b/drivers/net/e1000e/ich8lan.c
37775@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
37776 }
37777 }
37778
37779-static struct e1000_mac_operations ich8_mac_ops = {
37780+static const struct e1000_mac_operations ich8_mac_ops = {
37781 .id_led_init = e1000e_id_led_init,
37782 .check_mng_mode = e1000_check_mng_mode_ich8lan,
37783 .check_for_link = e1000_check_for_copper_link_ich8lan,
37784@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
37785 /* id_led_init dependent on mac type */
37786 };
37787
37788-static struct e1000_phy_operations ich8_phy_ops = {
37789+static const struct e1000_phy_operations ich8_phy_ops = {
37790 .acquire_phy = e1000_acquire_swflag_ich8lan,
37791 .check_reset_block = e1000_check_reset_block_ich8lan,
37792 .commit_phy = NULL,
37793@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
37794 .write_phy_reg = e1000e_write_phy_reg_igp,
37795 };
37796
37797-static struct e1000_nvm_operations ich8_nvm_ops = {
37798+static const struct e1000_nvm_operations ich8_nvm_ops = {
37799 .acquire_nvm = e1000_acquire_nvm_ich8lan,
37800 .read_nvm = e1000_read_nvm_ich8lan,
37801 .release_nvm = e1000_release_nvm_ich8lan,
37802diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
37803index 18d5fbb..542d96d 100644
37804--- a/drivers/net/fealnx.c
37805+++ b/drivers/net/fealnx.c
37806@@ -151,7 +151,7 @@ struct chip_info {
37807 int flags;
37808 };
37809
37810-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
37811+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
37812 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37813 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
37814 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37815diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
37816index 0e5b54b..b503f82 100644
37817--- a/drivers/net/hamradio/6pack.c
37818+++ b/drivers/net/hamradio/6pack.c
37819@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
37820 unsigned char buf[512];
37821 int count1;
37822
37823+ pax_track_stack();
37824+
37825 if (!count)
37826 return;
37827
37828diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
37829index 5862282..7cce8cb 100644
37830--- a/drivers/net/ibmveth.c
37831+++ b/drivers/net/ibmveth.c
37832@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
37833 NULL,
37834 };
37835
37836-static struct sysfs_ops veth_pool_ops = {
37837+static const struct sysfs_ops veth_pool_ops = {
37838 .show = veth_pool_show,
37839 .store = veth_pool_store,
37840 };
37841diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
37842index d617f2d..57b5309 100644
37843--- a/drivers/net/igb/e1000_82575.c
37844+++ b/drivers/net/igb/e1000_82575.c
37845@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
37846 wr32(E1000_VT_CTL, vt_ctl);
37847 }
37848
37849-static struct e1000_mac_operations e1000_mac_ops_82575 = {
37850+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
37851 .reset_hw = igb_reset_hw_82575,
37852 .init_hw = igb_init_hw_82575,
37853 .check_for_link = igb_check_for_link_82575,
37854@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
37855 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
37856 };
37857
37858-static struct e1000_phy_operations e1000_phy_ops_82575 = {
37859+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
37860 .acquire = igb_acquire_phy_82575,
37861 .get_cfg_done = igb_get_cfg_done_82575,
37862 .release = igb_release_phy_82575,
37863 };
37864
37865-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37866+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37867 .acquire = igb_acquire_nvm_82575,
37868 .read = igb_read_nvm_eerd,
37869 .release = igb_release_nvm_82575,
37870diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
37871index 72081df..d855cf5 100644
37872--- a/drivers/net/igb/e1000_hw.h
37873+++ b/drivers/net/igb/e1000_hw.h
37874@@ -288,6 +288,7 @@ struct e1000_mac_operations {
37875 s32 (*read_mac_addr)(struct e1000_hw *);
37876 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
37877 };
37878+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37879
37880 struct e1000_phy_operations {
37881 s32 (*acquire)(struct e1000_hw *);
37882@@ -303,6 +304,7 @@ struct e1000_phy_operations {
37883 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
37884 s32 (*write_reg)(struct e1000_hw *, u32, u16);
37885 };
37886+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37887
37888 struct e1000_nvm_operations {
37889 s32 (*acquire)(struct e1000_hw *);
37890@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
37891 void (*release)(struct e1000_hw *);
37892 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
37893 };
37894+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37895
37896 struct e1000_info {
37897 s32 (*get_invariants)(struct e1000_hw *);
37898@@ -321,7 +324,7 @@ struct e1000_info {
37899 extern const struct e1000_info e1000_82575_info;
37900
37901 struct e1000_mac_info {
37902- struct e1000_mac_operations ops;
37903+ e1000_mac_operations_no_const ops;
37904
37905 u8 addr[6];
37906 u8 perm_addr[6];
37907@@ -365,7 +368,7 @@ struct e1000_mac_info {
37908 };
37909
37910 struct e1000_phy_info {
37911- struct e1000_phy_operations ops;
37912+ e1000_phy_operations_no_const ops;
37913
37914 enum e1000_phy_type type;
37915
37916@@ -400,7 +403,7 @@ struct e1000_phy_info {
37917 };
37918
37919 struct e1000_nvm_info {
37920- struct e1000_nvm_operations ops;
37921+ e1000_nvm_operations_no_const ops;
37922
37923 enum e1000_nvm_type type;
37924 enum e1000_nvm_override override;
37925@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
37926 s32 (*check_for_ack)(struct e1000_hw *, u16);
37927 s32 (*check_for_rst)(struct e1000_hw *, u16);
37928 };
37929+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
37930
37931 struct e1000_mbx_stats {
37932 u32 msgs_tx;
37933@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
37934 };
37935
37936 struct e1000_mbx_info {
37937- struct e1000_mbx_operations ops;
37938+ e1000_mbx_operations_no_const ops;
37939 struct e1000_mbx_stats stats;
37940 u32 timeout;
37941 u32 usec_delay;
37942diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
37943index 1e8ce37..549c453 100644
37944--- a/drivers/net/igbvf/vf.h
37945+++ b/drivers/net/igbvf/vf.h
37946@@ -187,9 +187,10 @@ struct e1000_mac_operations {
37947 s32 (*read_mac_addr)(struct e1000_hw *);
37948 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
37949 };
37950+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37951
37952 struct e1000_mac_info {
37953- struct e1000_mac_operations ops;
37954+ e1000_mac_operations_no_const ops;
37955 u8 addr[6];
37956 u8 perm_addr[6];
37957
37958@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
37959 s32 (*check_for_ack)(struct e1000_hw *);
37960 s32 (*check_for_rst)(struct e1000_hw *);
37961 };
37962+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
37963
37964 struct e1000_mbx_stats {
37965 u32 msgs_tx;
37966@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
37967 };
37968
37969 struct e1000_mbx_info {
37970- struct e1000_mbx_operations ops;
37971+ e1000_mbx_operations_no_const ops;
37972 struct e1000_mbx_stats stats;
37973 u32 timeout;
37974 u32 usec_delay;
37975diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
37976index aa7286b..a61394f 100644
37977--- a/drivers/net/iseries_veth.c
37978+++ b/drivers/net/iseries_veth.c
37979@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
37980 NULL
37981 };
37982
37983-static struct sysfs_ops veth_cnx_sysfs_ops = {
37984+static const struct sysfs_ops veth_cnx_sysfs_ops = {
37985 .show = veth_cnx_attribute_show
37986 };
37987
37988@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
37989 NULL
37990 };
37991
37992-static struct sysfs_ops veth_port_sysfs_ops = {
37993+static const struct sysfs_ops veth_port_sysfs_ops = {
37994 .show = veth_port_attribute_show
37995 };
37996
37997diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
37998index 8aa44dc..fa1e797 100644
37999--- a/drivers/net/ixgb/ixgb_main.c
38000+++ b/drivers/net/ixgb/ixgb_main.c
38001@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38002 u32 rctl;
38003 int i;
38004
38005+ pax_track_stack();
38006+
38007 /* Check for Promiscuous and All Multicast modes */
38008
38009 rctl = IXGB_READ_REG(hw, RCTL);
38010diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38011index af35e1d..8781785 100644
38012--- a/drivers/net/ixgb/ixgb_param.c
38013+++ b/drivers/net/ixgb/ixgb_param.c
38014@@ -260,6 +260,9 @@ void __devinit
38015 ixgb_check_options(struct ixgb_adapter *adapter)
38016 {
38017 int bd = adapter->bd_number;
38018+
38019+ pax_track_stack();
38020+
38021 if (bd >= IXGB_MAX_NIC) {
38022 printk(KERN_NOTICE
38023 "Warning: no configuration for board #%i\n", bd);
38024diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38025index b17aa73..ed74540 100644
38026--- a/drivers/net/ixgbe/ixgbe_type.h
38027+++ b/drivers/net/ixgbe/ixgbe_type.h
38028@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38029 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38030 s32 (*update_checksum)(struct ixgbe_hw *);
38031 };
38032+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38033
38034 struct ixgbe_mac_operations {
38035 s32 (*init_hw)(struct ixgbe_hw *);
38036@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38037 /* Flow Control */
38038 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38039 };
38040+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38041
38042 struct ixgbe_phy_operations {
38043 s32 (*identify)(struct ixgbe_hw *);
38044@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38045 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38046 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38047 };
38048+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38049
38050 struct ixgbe_eeprom_info {
38051- struct ixgbe_eeprom_operations ops;
38052+ ixgbe_eeprom_operations_no_const ops;
38053 enum ixgbe_eeprom_type type;
38054 u32 semaphore_delay;
38055 u16 word_size;
38056@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38057 };
38058
38059 struct ixgbe_mac_info {
38060- struct ixgbe_mac_operations ops;
38061+ ixgbe_mac_operations_no_const ops;
38062 enum ixgbe_mac_type type;
38063 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38064 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38065@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38066 };
38067
38068 struct ixgbe_phy_info {
38069- struct ixgbe_phy_operations ops;
38070+ ixgbe_phy_operations_no_const ops;
38071 struct mdio_if_info mdio;
38072 enum ixgbe_phy_type type;
38073 u32 id;
38074diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38075index 291a505..2543756 100644
38076--- a/drivers/net/mlx4/main.c
38077+++ b/drivers/net/mlx4/main.c
38078@@ -38,6 +38,7 @@
38079 #include <linux/errno.h>
38080 #include <linux/pci.h>
38081 #include <linux/dma-mapping.h>
38082+#include <linux/sched.h>
38083
38084 #include <linux/mlx4/device.h>
38085 #include <linux/mlx4/doorbell.h>
38086@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38087 u64 icm_size;
38088 int err;
38089
38090+ pax_track_stack();
38091+
38092 err = mlx4_QUERY_FW(dev);
38093 if (err) {
38094 if (err == -EACCES)
38095diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38096index 2dce134..fa5ce75 100644
38097--- a/drivers/net/niu.c
38098+++ b/drivers/net/niu.c
38099@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38100 int i, num_irqs, err;
38101 u8 first_ldg;
38102
38103+ pax_track_stack();
38104+
38105 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38106 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38107 ldg_num_map[i] = first_ldg + i;
38108diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38109index c1b3f09..97cd8c4 100644
38110--- a/drivers/net/pcnet32.c
38111+++ b/drivers/net/pcnet32.c
38112@@ -79,7 +79,7 @@ static int cards_found;
38113 /*
38114 * VLB I/O addresses
38115 */
38116-static unsigned int pcnet32_portlist[] __initdata =
38117+static unsigned int pcnet32_portlist[] __devinitdata =
38118 { 0x300, 0x320, 0x340, 0x360, 0 };
38119
38120 static int pcnet32_debug = 0;
38121@@ -267,7 +267,7 @@ struct pcnet32_private {
38122 struct sk_buff **rx_skbuff;
38123 dma_addr_t *tx_dma_addr;
38124 dma_addr_t *rx_dma_addr;
38125- struct pcnet32_access a;
38126+ struct pcnet32_access *a;
38127 spinlock_t lock; /* Guard lock */
38128 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38129 unsigned int rx_ring_size; /* current rx ring size */
38130@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38131 u16 val;
38132
38133 netif_wake_queue(dev);
38134- val = lp->a.read_csr(ioaddr, CSR3);
38135+ val = lp->a->read_csr(ioaddr, CSR3);
38136 val &= 0x00ff;
38137- lp->a.write_csr(ioaddr, CSR3, val);
38138+ lp->a->write_csr(ioaddr, CSR3, val);
38139 napi_enable(&lp->napi);
38140 }
38141
38142@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38143 r = mii_link_ok(&lp->mii_if);
38144 } else if (lp->chip_version >= PCNET32_79C970A) {
38145 ulong ioaddr = dev->base_addr; /* card base I/O address */
38146- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38147+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38148 } else { /* can not detect link on really old chips */
38149 r = 1;
38150 }
38151@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38152 pcnet32_netif_stop(dev);
38153
38154 spin_lock_irqsave(&lp->lock, flags);
38155- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38156+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38157
38158 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38159
38160@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38161 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38162 {
38163 struct pcnet32_private *lp = netdev_priv(dev);
38164- struct pcnet32_access *a = &lp->a; /* access to registers */
38165+ struct pcnet32_access *a = lp->a; /* access to registers */
38166 ulong ioaddr = dev->base_addr; /* card base I/O address */
38167 struct sk_buff *skb; /* sk buff */
38168 int x, i; /* counters */
38169@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38170 pcnet32_netif_stop(dev);
38171
38172 spin_lock_irqsave(&lp->lock, flags);
38173- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38174+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38175
38176 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38177
38178 /* Reset the PCNET32 */
38179- lp->a.reset(ioaddr);
38180- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38181+ lp->a->reset(ioaddr);
38182+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38183
38184 /* switch pcnet32 to 32bit mode */
38185- lp->a.write_bcr(ioaddr, 20, 2);
38186+ lp->a->write_bcr(ioaddr, 20, 2);
38187
38188 /* purge & init rings but don't actually restart */
38189 pcnet32_restart(dev, 0x0000);
38190
38191- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38192+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38193
38194 /* Initialize Transmit buffers. */
38195 size = data_len + 15;
38196@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38197
38198 /* set int loopback in CSR15 */
38199 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38200- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38201+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38202
38203 teststatus = cpu_to_le16(0x8000);
38204- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38205+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38206
38207 /* Check status of descriptors */
38208 for (x = 0; x < numbuffs; x++) {
38209@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38210 }
38211 }
38212
38213- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38214+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38215 wmb();
38216 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38217 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38218@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38219 pcnet32_restart(dev, CSR0_NORMAL);
38220 } else {
38221 pcnet32_purge_rx_ring(dev);
38222- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38223+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38224 }
38225 spin_unlock_irqrestore(&lp->lock, flags);
38226
38227@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38228 static void pcnet32_led_blink_callback(struct net_device *dev)
38229 {
38230 struct pcnet32_private *lp = netdev_priv(dev);
38231- struct pcnet32_access *a = &lp->a;
38232+ struct pcnet32_access *a = lp->a;
38233 ulong ioaddr = dev->base_addr;
38234 unsigned long flags;
38235 int i;
38236@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38237 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38238 {
38239 struct pcnet32_private *lp = netdev_priv(dev);
38240- struct pcnet32_access *a = &lp->a;
38241+ struct pcnet32_access *a = lp->a;
38242 ulong ioaddr = dev->base_addr;
38243 unsigned long flags;
38244 int i, regs[4];
38245@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38246 {
38247 int csr5;
38248 struct pcnet32_private *lp = netdev_priv(dev);
38249- struct pcnet32_access *a = &lp->a;
38250+ struct pcnet32_access *a = lp->a;
38251 ulong ioaddr = dev->base_addr;
38252 int ticks;
38253
38254@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38255 spin_lock_irqsave(&lp->lock, flags);
38256 if (pcnet32_tx(dev)) {
38257 /* reset the chip to clear the error condition, then restart */
38258- lp->a.reset(ioaddr);
38259- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38260+ lp->a->reset(ioaddr);
38261+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38262 pcnet32_restart(dev, CSR0_START);
38263 netif_wake_queue(dev);
38264 }
38265@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38266 __napi_complete(napi);
38267
38268 /* clear interrupt masks */
38269- val = lp->a.read_csr(ioaddr, CSR3);
38270+ val = lp->a->read_csr(ioaddr, CSR3);
38271 val &= 0x00ff;
38272- lp->a.write_csr(ioaddr, CSR3, val);
38273+ lp->a->write_csr(ioaddr, CSR3, val);
38274
38275 /* Set interrupt enable. */
38276- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38277+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38278
38279 spin_unlock_irqrestore(&lp->lock, flags);
38280 }
38281@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38282 int i, csr0;
38283 u16 *buff = ptr;
38284 struct pcnet32_private *lp = netdev_priv(dev);
38285- struct pcnet32_access *a = &lp->a;
38286+ struct pcnet32_access *a = lp->a;
38287 ulong ioaddr = dev->base_addr;
38288 unsigned long flags;
38289
38290@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38291 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38292 if (lp->phymask & (1 << j)) {
38293 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38294- lp->a.write_bcr(ioaddr, 33,
38295+ lp->a->write_bcr(ioaddr, 33,
38296 (j << 5) | i);
38297- *buff++ = lp->a.read_bcr(ioaddr, 34);
38298+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38299 }
38300 }
38301 }
38302@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38303 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38304 lp->options |= PCNET32_PORT_FD;
38305
38306- lp->a = *a;
38307+ lp->a = a;
38308
38309 /* prior to register_netdev, dev->name is not yet correct */
38310 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38311@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38312 if (lp->mii) {
38313 /* lp->phycount and lp->phymask are set to 0 by memset above */
38314
38315- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38316+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38317 /* scan for PHYs */
38318 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38319 unsigned short id1, id2;
38320@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38321 "Found PHY %04x:%04x at address %d.\n",
38322 id1, id2, i);
38323 }
38324- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38325+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38326 if (lp->phycount > 1) {
38327 lp->options |= PCNET32_PORT_MII;
38328 }
38329@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38330 }
38331
38332 /* Reset the PCNET32 */
38333- lp->a.reset(ioaddr);
38334+ lp->a->reset(ioaddr);
38335
38336 /* switch pcnet32 to 32bit mode */
38337- lp->a.write_bcr(ioaddr, 20, 2);
38338+ lp->a->write_bcr(ioaddr, 20, 2);
38339
38340 if (netif_msg_ifup(lp))
38341 printk(KERN_DEBUG
38342@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38343 (u32) (lp->init_dma_addr));
38344
38345 /* set/reset autoselect bit */
38346- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38347+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38348 if (lp->options & PCNET32_PORT_ASEL)
38349 val |= 2;
38350- lp->a.write_bcr(ioaddr, 2, val);
38351+ lp->a->write_bcr(ioaddr, 2, val);
38352
38353 /* handle full duplex setting */
38354 if (lp->mii_if.full_duplex) {
38355- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38356+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38357 if (lp->options & PCNET32_PORT_FD) {
38358 val |= 1;
38359 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38360@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38361 if (lp->chip_version == 0x2627)
38362 val |= 3;
38363 }
38364- lp->a.write_bcr(ioaddr, 9, val);
38365+ lp->a->write_bcr(ioaddr, 9, val);
38366 }
38367
38368 /* set/reset GPSI bit in test register */
38369- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38370+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38371 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38372 val |= 0x10;
38373- lp->a.write_csr(ioaddr, 124, val);
38374+ lp->a->write_csr(ioaddr, 124, val);
38375
38376 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38377 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38378@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38379 * duplex, and/or enable auto negotiation, and clear DANAS
38380 */
38381 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38382- lp->a.write_bcr(ioaddr, 32,
38383- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38384+ lp->a->write_bcr(ioaddr, 32,
38385+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38386 /* disable Auto Negotiation, set 10Mpbs, HD */
38387- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38388+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38389 if (lp->options & PCNET32_PORT_FD)
38390 val |= 0x10;
38391 if (lp->options & PCNET32_PORT_100)
38392 val |= 0x08;
38393- lp->a.write_bcr(ioaddr, 32, val);
38394+ lp->a->write_bcr(ioaddr, 32, val);
38395 } else {
38396 if (lp->options & PCNET32_PORT_ASEL) {
38397- lp->a.write_bcr(ioaddr, 32,
38398- lp->a.read_bcr(ioaddr,
38399+ lp->a->write_bcr(ioaddr, 32,
38400+ lp->a->read_bcr(ioaddr,
38401 32) | 0x0080);
38402 /* enable auto negotiate, setup, disable fd */
38403- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38404+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38405 val |= 0x20;
38406- lp->a.write_bcr(ioaddr, 32, val);
38407+ lp->a->write_bcr(ioaddr, 32, val);
38408 }
38409 }
38410 } else {
38411@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38412 * There is really no good other way to handle multiple PHYs
38413 * other than turning off all automatics
38414 */
38415- val = lp->a.read_bcr(ioaddr, 2);
38416- lp->a.write_bcr(ioaddr, 2, val & ~2);
38417- val = lp->a.read_bcr(ioaddr, 32);
38418- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38419+ val = lp->a->read_bcr(ioaddr, 2);
38420+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38421+ val = lp->a->read_bcr(ioaddr, 32);
38422+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38423
38424 if (!(lp->options & PCNET32_PORT_ASEL)) {
38425 /* setup ecmd */
38426@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38427 ecmd.speed =
38428 lp->
38429 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38430- bcr9 = lp->a.read_bcr(ioaddr, 9);
38431+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38432
38433 if (lp->options & PCNET32_PORT_FD) {
38434 ecmd.duplex = DUPLEX_FULL;
38435@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38436 ecmd.duplex = DUPLEX_HALF;
38437 bcr9 |= ~(1 << 0);
38438 }
38439- lp->a.write_bcr(ioaddr, 9, bcr9);
38440+ lp->a->write_bcr(ioaddr, 9, bcr9);
38441 }
38442
38443 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38444@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38445
38446 #ifdef DO_DXSUFLO
38447 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38448- val = lp->a.read_csr(ioaddr, CSR3);
38449+ val = lp->a->read_csr(ioaddr, CSR3);
38450 val |= 0x40;
38451- lp->a.write_csr(ioaddr, CSR3, val);
38452+ lp->a->write_csr(ioaddr, CSR3, val);
38453 }
38454 #endif
38455
38456@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38457 napi_enable(&lp->napi);
38458
38459 /* Re-initialize the PCNET32, and start it when done. */
38460- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38461- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38462+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38463+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38464
38465- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38466- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38467+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38468+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38469
38470 netif_start_queue(dev);
38471
38472@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38473
38474 i = 0;
38475 while (i++ < 100)
38476- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38477+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38478 break;
38479 /*
38480 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38481 * reports that doing so triggers a bug in the '974.
38482 */
38483- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38484+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38485
38486 if (netif_msg_ifup(lp))
38487 printk(KERN_DEBUG
38488 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38489 dev->name, i,
38490 (u32) (lp->init_dma_addr),
38491- lp->a.read_csr(ioaddr, CSR0));
38492+ lp->a->read_csr(ioaddr, CSR0));
38493
38494 spin_unlock_irqrestore(&lp->lock, flags);
38495
38496@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38497 * Switch back to 16bit mode to avoid problems with dumb
38498 * DOS packet driver after a warm reboot
38499 */
38500- lp->a.write_bcr(ioaddr, 20, 4);
38501+ lp->a->write_bcr(ioaddr, 20, 4);
38502
38503 err_free_irq:
38504 spin_unlock_irqrestore(&lp->lock, flags);
38505@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38506
38507 /* wait for stop */
38508 for (i = 0; i < 100; i++)
38509- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38510+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38511 break;
38512
38513 if (i >= 100 && netif_msg_drv(lp))
38514@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38515 return;
38516
38517 /* ReInit Ring */
38518- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38519+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38520 i = 0;
38521 while (i++ < 1000)
38522- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38523+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38524 break;
38525
38526- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38527+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38528 }
38529
38530 static void pcnet32_tx_timeout(struct net_device *dev)
38531@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38532 if (pcnet32_debug & NETIF_MSG_DRV)
38533 printk(KERN_ERR
38534 "%s: transmit timed out, status %4.4x, resetting.\n",
38535- dev->name, lp->a.read_csr(ioaddr, CSR0));
38536- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38537+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38538+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38539 dev->stats.tx_errors++;
38540 if (netif_msg_tx_err(lp)) {
38541 int i;
38542@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38543 if (netif_msg_tx_queued(lp)) {
38544 printk(KERN_DEBUG
38545 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38546- dev->name, lp->a.read_csr(ioaddr, CSR0));
38547+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38548 }
38549
38550 /* Default status -- will not enable Successful-TxDone
38551@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38552 dev->stats.tx_bytes += skb->len;
38553
38554 /* Trigger an immediate send poll. */
38555- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38556+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38557
38558 dev->trans_start = jiffies;
38559
38560@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38561
38562 spin_lock(&lp->lock);
38563
38564- csr0 = lp->a.read_csr(ioaddr, CSR0);
38565+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38566 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38567 if (csr0 == 0xffff) {
38568 break; /* PCMCIA remove happened */
38569 }
38570 /* Acknowledge all of the current interrupt sources ASAP. */
38571- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38572+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38573
38574 if (netif_msg_intr(lp))
38575 printk(KERN_DEBUG
38576 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38577- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38578+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38579
38580 /* Log misc errors. */
38581 if (csr0 & 0x4000)
38582@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38583 if (napi_schedule_prep(&lp->napi)) {
38584 u16 val;
38585 /* set interrupt masks */
38586- val = lp->a.read_csr(ioaddr, CSR3);
38587+ val = lp->a->read_csr(ioaddr, CSR3);
38588 val |= 0x5f00;
38589- lp->a.write_csr(ioaddr, CSR3, val);
38590+ lp->a->write_csr(ioaddr, CSR3, val);
38591
38592 __napi_schedule(&lp->napi);
38593 break;
38594 }
38595- csr0 = lp->a.read_csr(ioaddr, CSR0);
38596+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38597 }
38598
38599 if (netif_msg_intr(lp))
38600 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38601- dev->name, lp->a.read_csr(ioaddr, CSR0));
38602+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38603
38604 spin_unlock(&lp->lock);
38605
38606@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38607
38608 spin_lock_irqsave(&lp->lock, flags);
38609
38610- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38611+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38612
38613 if (netif_msg_ifdown(lp))
38614 printk(KERN_DEBUG
38615 "%s: Shutting down ethercard, status was %2.2x.\n",
38616- dev->name, lp->a.read_csr(ioaddr, CSR0));
38617+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38618
38619 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38620- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38621+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38622
38623 /*
38624 * Switch back to 16bit mode to avoid problems with dumb
38625 * DOS packet driver after a warm reboot
38626 */
38627- lp->a.write_bcr(ioaddr, 20, 4);
38628+ lp->a->write_bcr(ioaddr, 20, 4);
38629
38630 spin_unlock_irqrestore(&lp->lock, flags);
38631
38632@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38633 unsigned long flags;
38634
38635 spin_lock_irqsave(&lp->lock, flags);
38636- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38637+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38638 spin_unlock_irqrestore(&lp->lock, flags);
38639
38640 return &dev->stats;
38641@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38642 if (dev->flags & IFF_ALLMULTI) {
38643 ib->filter[0] = cpu_to_le32(~0U);
38644 ib->filter[1] = cpu_to_le32(~0U);
38645- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38646- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38647- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38648- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38649+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38650+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38651+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38652+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38653 return;
38654 }
38655 /* clear the multicast filter */
38656@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38657 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38658 }
38659 for (i = 0; i < 4; i++)
38660- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38661+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38662 le16_to_cpu(mcast_table[i]));
38663 return;
38664 }
38665@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38666
38667 spin_lock_irqsave(&lp->lock, flags);
38668 suspended = pcnet32_suspend(dev, &flags, 0);
38669- csr15 = lp->a.read_csr(ioaddr, CSR15);
38670+ csr15 = lp->a->read_csr(ioaddr, CSR15);
38671 if (dev->flags & IFF_PROMISC) {
38672 /* Log any net taps. */
38673 if (netif_msg_hw(lp))
38674@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38675 lp->init_block->mode =
38676 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38677 7);
38678- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38679+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38680 } else {
38681 lp->init_block->mode =
38682 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38683- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38684+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38685 pcnet32_load_multicast(dev);
38686 }
38687
38688 if (suspended) {
38689 int csr5;
38690 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38691- csr5 = lp->a.read_csr(ioaddr, CSR5);
38692- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38693+ csr5 = lp->a->read_csr(ioaddr, CSR5);
38694+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38695 } else {
38696- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38697+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38698 pcnet32_restart(dev, CSR0_NORMAL);
38699 netif_wake_queue(dev);
38700 }
38701@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38702 if (!lp->mii)
38703 return 0;
38704
38705- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38706- val_out = lp->a.read_bcr(ioaddr, 34);
38707+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38708+ val_out = lp->a->read_bcr(ioaddr, 34);
38709
38710 return val_out;
38711 }
38712@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38713 if (!lp->mii)
38714 return;
38715
38716- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38717- lp->a.write_bcr(ioaddr, 34, val);
38718+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38719+ lp->a->write_bcr(ioaddr, 34, val);
38720 }
38721
38722 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38723@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38724 curr_link = mii_link_ok(&lp->mii_if);
38725 } else {
38726 ulong ioaddr = dev->base_addr; /* card base I/O address */
38727- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38728+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38729 }
38730 if (!curr_link) {
38731 if (prev_link || verbose) {
38732@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38733 (ecmd.duplex ==
38734 DUPLEX_FULL) ? "full" : "half");
38735 }
38736- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38737+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38738 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38739 if (lp->mii_if.full_duplex)
38740 bcr9 |= (1 << 0);
38741 else
38742 bcr9 &= ~(1 << 0);
38743- lp->a.write_bcr(dev->base_addr, 9, bcr9);
38744+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
38745 }
38746 } else {
38747 if (netif_msg_link(lp))
38748diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
38749index 7cc9898..6eb50d3 100644
38750--- a/drivers/net/sis190.c
38751+++ b/drivers/net/sis190.c
38752@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
38753 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
38754 struct net_device *dev)
38755 {
38756- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
38757+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
38758 struct sis190_private *tp = netdev_priv(dev);
38759 struct pci_dev *isa_bridge;
38760 u8 reg, tmp8;
38761diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
38762index e13685a..60c948c 100644
38763--- a/drivers/net/sundance.c
38764+++ b/drivers/net/sundance.c
38765@@ -225,7 +225,7 @@ enum {
38766 struct pci_id_info {
38767 const char *name;
38768 };
38769-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
38770+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
38771 {"D-Link DFE-550TX FAST Ethernet Adapter"},
38772 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
38773 {"D-Link DFE-580TX 4 port Server Adapter"},
38774diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
38775index 529f55a..cccaa18 100644
38776--- a/drivers/net/tg3.h
38777+++ b/drivers/net/tg3.h
38778@@ -95,6 +95,7 @@
38779 #define CHIPREV_ID_5750_A0 0x4000
38780 #define CHIPREV_ID_5750_A1 0x4001
38781 #define CHIPREV_ID_5750_A3 0x4003
38782+#define CHIPREV_ID_5750_C1 0x4201
38783 #define CHIPREV_ID_5750_C2 0x4202
38784 #define CHIPREV_ID_5752_A0_HW 0x5000
38785 #define CHIPREV_ID_5752_A0 0x6000
38786diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
38787index b9db1b5..720f9ce 100644
38788--- a/drivers/net/tokenring/abyss.c
38789+++ b/drivers/net/tokenring/abyss.c
38790@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
38791
38792 static int __init abyss_init (void)
38793 {
38794- abyss_netdev_ops = tms380tr_netdev_ops;
38795+ pax_open_kernel();
38796+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38797
38798- abyss_netdev_ops.ndo_open = abyss_open;
38799- abyss_netdev_ops.ndo_stop = abyss_close;
38800+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
38801+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
38802+ pax_close_kernel();
38803
38804 return pci_register_driver(&abyss_driver);
38805 }
38806diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
38807index 456f8bf..373e56d 100644
38808--- a/drivers/net/tokenring/madgemc.c
38809+++ b/drivers/net/tokenring/madgemc.c
38810@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
38811
38812 static int __init madgemc_init (void)
38813 {
38814- madgemc_netdev_ops = tms380tr_netdev_ops;
38815- madgemc_netdev_ops.ndo_open = madgemc_open;
38816- madgemc_netdev_ops.ndo_stop = madgemc_close;
38817+ pax_open_kernel();
38818+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38819+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
38820+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
38821+ pax_close_kernel();
38822
38823 return mca_register_driver (&madgemc_driver);
38824 }
38825diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
38826index 16e8783..925bd49 100644
38827--- a/drivers/net/tokenring/proteon.c
38828+++ b/drivers/net/tokenring/proteon.c
38829@@ -353,9 +353,11 @@ static int __init proteon_init(void)
38830 struct platform_device *pdev;
38831 int i, num = 0, err = 0;
38832
38833- proteon_netdev_ops = tms380tr_netdev_ops;
38834- proteon_netdev_ops.ndo_open = proteon_open;
38835- proteon_netdev_ops.ndo_stop = tms380tr_close;
38836+ pax_open_kernel();
38837+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38838+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
38839+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
38840+ pax_close_kernel();
38841
38842 err = platform_driver_register(&proteon_driver);
38843 if (err)
38844diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
38845index 46db5c5..37c1536 100644
38846--- a/drivers/net/tokenring/skisa.c
38847+++ b/drivers/net/tokenring/skisa.c
38848@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
38849 struct platform_device *pdev;
38850 int i, num = 0, err = 0;
38851
38852- sk_isa_netdev_ops = tms380tr_netdev_ops;
38853- sk_isa_netdev_ops.ndo_open = sk_isa_open;
38854- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38855+ pax_open_kernel();
38856+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38857+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
38858+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38859+ pax_close_kernel();
38860
38861 err = platform_driver_register(&sk_isa_driver);
38862 if (err)
38863diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
38864index 74e5ba4..5cf6bc9 100644
38865--- a/drivers/net/tulip/de2104x.c
38866+++ b/drivers/net/tulip/de2104x.c
38867@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
38868 struct de_srom_info_leaf *il;
38869 void *bufp;
38870
38871+ pax_track_stack();
38872+
38873 /* download entire eeprom */
38874 for (i = 0; i < DE_EEPROM_WORDS; i++)
38875 ((__le16 *)ee_data)[i] =
38876diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
38877index a8349b7..90f9dfe 100644
38878--- a/drivers/net/tulip/de4x5.c
38879+++ b/drivers/net/tulip/de4x5.c
38880@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38881 for (i=0; i<ETH_ALEN; i++) {
38882 tmp.addr[i] = dev->dev_addr[i];
38883 }
38884- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38885+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38886 break;
38887
38888 case DE4X5_SET_HWADDR: /* Set the hardware address */
38889@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38890 spin_lock_irqsave(&lp->lock, flags);
38891 memcpy(&statbuf, &lp->pktStats, ioc->len);
38892 spin_unlock_irqrestore(&lp->lock, flags);
38893- if (copy_to_user(ioc->data, &statbuf, ioc->len))
38894+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
38895 return -EFAULT;
38896 break;
38897 }
38898diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
38899index 391acd3..56d11cd 100644
38900--- a/drivers/net/tulip/eeprom.c
38901+++ b/drivers/net/tulip/eeprom.c
38902@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
38903 {NULL}};
38904
38905
38906-static const char *block_name[] __devinitdata = {
38907+static const char *block_name[] __devinitconst = {
38908 "21140 non-MII",
38909 "21140 MII PHY",
38910 "21142 Serial PHY",
38911diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
38912index b38d3b7..b1cff23 100644
38913--- a/drivers/net/tulip/winbond-840.c
38914+++ b/drivers/net/tulip/winbond-840.c
38915@@ -235,7 +235,7 @@ struct pci_id_info {
38916 int drv_flags; /* Driver use, intended as capability flags. */
38917 };
38918
38919-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
38920+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
38921 { /* Sometime a Level-One switch card. */
38922 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
38923 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
38924diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
38925index f450bc9..2b747c8 100644
38926--- a/drivers/net/usb/hso.c
38927+++ b/drivers/net/usb/hso.c
38928@@ -71,7 +71,7 @@
38929 #include <asm/byteorder.h>
38930 #include <linux/serial_core.h>
38931 #include <linux/serial.h>
38932-
38933+#include <asm/local.h>
38934
38935 #define DRIVER_VERSION "1.2"
38936 #define MOD_AUTHOR "Option Wireless"
38937@@ -258,7 +258,7 @@ struct hso_serial {
38938
38939 /* from usb_serial_port */
38940 struct tty_struct *tty;
38941- int open_count;
38942+ local_t open_count;
38943 spinlock_t serial_lock;
38944
38945 int (*write_data) (struct hso_serial *serial);
38946@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
38947 struct urb *urb;
38948
38949 urb = serial->rx_urb[0];
38950- if (serial->open_count > 0) {
38951+ if (local_read(&serial->open_count) > 0) {
38952 count = put_rxbuf_data(urb, serial);
38953 if (count == -1)
38954 return;
38955@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
38956 DUMP1(urb->transfer_buffer, urb->actual_length);
38957
38958 /* Anyone listening? */
38959- if (serial->open_count == 0)
38960+ if (local_read(&serial->open_count) == 0)
38961 return;
38962
38963 if (status == 0) {
38964@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
38965 spin_unlock_irq(&serial->serial_lock);
38966
38967 /* check for port already opened, if not set the termios */
38968- serial->open_count++;
38969- if (serial->open_count == 1) {
38970+ if (local_inc_return(&serial->open_count) == 1) {
38971 tty->low_latency = 1;
38972 serial->rx_state = RX_IDLE;
38973 /* Force default termio settings */
38974@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
38975 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
38976 if (result) {
38977 hso_stop_serial_device(serial->parent);
38978- serial->open_count--;
38979+ local_dec(&serial->open_count);
38980 kref_put(&serial->parent->ref, hso_serial_ref_free);
38981 }
38982 } else {
38983@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
38984
38985 /* reset the rts and dtr */
38986 /* do the actual close */
38987- serial->open_count--;
38988+ local_dec(&serial->open_count);
38989
38990- if (serial->open_count <= 0) {
38991- serial->open_count = 0;
38992+ if (local_read(&serial->open_count) <= 0) {
38993+ local_set(&serial->open_count, 0);
38994 spin_lock_irq(&serial->serial_lock);
38995 if (serial->tty == tty) {
38996 serial->tty->driver_data = NULL;
38997@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
38998
38999 /* the actual setup */
39000 spin_lock_irqsave(&serial->serial_lock, flags);
39001- if (serial->open_count)
39002+ if (local_read(&serial->open_count))
39003 _hso_serial_set_termios(tty, old);
39004 else
39005 tty->termios = old;
39006@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39007 /* Start all serial ports */
39008 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39009 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39010- if (dev2ser(serial_table[i])->open_count) {
39011+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39012 result =
39013 hso_start_serial_device(serial_table[i], GFP_NOIO);
39014 hso_kick_transmit(dev2ser(serial_table[i]));
39015diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39016index 3e94f0c..ffdd926 100644
39017--- a/drivers/net/vxge/vxge-config.h
39018+++ b/drivers/net/vxge/vxge-config.h
39019@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39020 void (*link_down)(struct __vxge_hw_device *devh);
39021 void (*crit_err)(struct __vxge_hw_device *devh,
39022 enum vxge_hw_event type, u64 ext_data);
39023-};
39024+} __no_const;
39025
39026 /*
39027 * struct __vxge_hw_blockpool_entry - Block private data structure
39028diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39029index 068d7a9..35293de 100644
39030--- a/drivers/net/vxge/vxge-main.c
39031+++ b/drivers/net/vxge/vxge-main.c
39032@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39033 struct sk_buff *completed[NR_SKB_COMPLETED];
39034 int more;
39035
39036+ pax_track_stack();
39037+
39038 do {
39039 more = 0;
39040 skb_ptr = completed;
39041@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39042 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39043 int index;
39044
39045+ pax_track_stack();
39046+
39047 /*
39048 * Filling
39049 * - itable with bucket numbers
39050diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39051index 461742b..81be42e 100644
39052--- a/drivers/net/vxge/vxge-traffic.h
39053+++ b/drivers/net/vxge/vxge-traffic.h
39054@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39055 struct vxge_hw_mempool_dma *dma_object,
39056 u32 index,
39057 u32 is_last);
39058-};
39059+} __no_const;
39060
39061 void
39062 __vxge_hw_mempool_destroy(
39063diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39064index cd8cb95..4153b79 100644
39065--- a/drivers/net/wan/cycx_x25.c
39066+++ b/drivers/net/wan/cycx_x25.c
39067@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39068 unsigned char hex[1024],
39069 * phex = hex;
39070
39071+ pax_track_stack();
39072+
39073 if (len >= (sizeof(hex) / 2))
39074 len = (sizeof(hex) / 2) - 1;
39075
39076diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39077index aa9248f..a4e3c3b 100644
39078--- a/drivers/net/wan/hdlc_x25.c
39079+++ b/drivers/net/wan/hdlc_x25.c
39080@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39081
39082 static int x25_open(struct net_device *dev)
39083 {
39084- struct lapb_register_struct cb;
39085+ static struct lapb_register_struct cb = {
39086+ .connect_confirmation = x25_connected,
39087+ .connect_indication = x25_connected,
39088+ .disconnect_confirmation = x25_disconnected,
39089+ .disconnect_indication = x25_disconnected,
39090+ .data_indication = x25_data_indication,
39091+ .data_transmit = x25_data_transmit
39092+ };
39093 int result;
39094
39095- cb.connect_confirmation = x25_connected;
39096- cb.connect_indication = x25_connected;
39097- cb.disconnect_confirmation = x25_disconnected;
39098- cb.disconnect_indication = x25_disconnected;
39099- cb.data_indication = x25_data_indication;
39100- cb.data_transmit = x25_data_transmit;
39101-
39102 result = lapb_register(dev, &cb);
39103 if (result != LAPB_OK)
39104 return result;
39105diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39106index 5ad287c..783b020 100644
39107--- a/drivers/net/wimax/i2400m/usb-fw.c
39108+++ b/drivers/net/wimax/i2400m/usb-fw.c
39109@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39110 int do_autopm = 1;
39111 DECLARE_COMPLETION_ONSTACK(notif_completion);
39112
39113+ pax_track_stack();
39114+
39115 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39116 i2400m, ack, ack_size);
39117 BUG_ON(_ack == i2400m->bm_ack_buf);
39118diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39119index 6c26840..62c97c3 100644
39120--- a/drivers/net/wireless/airo.c
39121+++ b/drivers/net/wireless/airo.c
39122@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39123 BSSListElement * loop_net;
39124 BSSListElement * tmp_net;
39125
39126+ pax_track_stack();
39127+
39128 /* Blow away current list of scan results */
39129 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39130 list_move_tail (&loop_net->list, &ai->network_free_list);
39131@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39132 WepKeyRid wkr;
39133 int rc;
39134
39135+ pax_track_stack();
39136+
39137 memset( &mySsid, 0, sizeof( mySsid ) );
39138 kfree (ai->flash);
39139 ai->flash = NULL;
39140@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39141 __le32 *vals = stats.vals;
39142 int len;
39143
39144+ pax_track_stack();
39145+
39146 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39147 return -ENOMEM;
39148 data = (struct proc_data *)file->private_data;
39149@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39150 /* If doLoseSync is not 1, we won't do a Lose Sync */
39151 int doLoseSync = -1;
39152
39153+ pax_track_stack();
39154+
39155 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39156 return -ENOMEM;
39157 data = (struct proc_data *)file->private_data;
39158@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39159 int i;
39160 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39161
39162+ pax_track_stack();
39163+
39164 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39165 if (!qual)
39166 return -ENOMEM;
39167@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39168 CapabilityRid cap_rid;
39169 __le32 *vals = stats_rid.vals;
39170
39171+ pax_track_stack();
39172+
39173 /* Get stats out of the card */
39174 clear_bit(JOB_WSTATS, &local->jobs);
39175 if (local->power.event) {
39176diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39177index 747508c..82e965d 100644
39178--- a/drivers/net/wireless/ath/ath5k/debug.c
39179+++ b/drivers/net/wireless/ath/ath5k/debug.c
39180@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39181 unsigned int v;
39182 u64 tsf;
39183
39184+ pax_track_stack();
39185+
39186 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39187 len += snprintf(buf+len, sizeof(buf)-len,
39188 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39189@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39190 unsigned int len = 0;
39191 unsigned int i;
39192
39193+ pax_track_stack();
39194+
39195 len += snprintf(buf+len, sizeof(buf)-len,
39196 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39197
39198diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39199index 2be4c22..593b1eb 100644
39200--- a/drivers/net/wireless/ath/ath9k/debug.c
39201+++ b/drivers/net/wireless/ath/ath9k/debug.c
39202@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39203 char buf[512];
39204 unsigned int len = 0;
39205
39206+ pax_track_stack();
39207+
39208 len += snprintf(buf + len, sizeof(buf) - len,
39209 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39210 len += snprintf(buf + len, sizeof(buf) - len,
39211@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39212 int i;
39213 u8 addr[ETH_ALEN];
39214
39215+ pax_track_stack();
39216+
39217 len += snprintf(buf + len, sizeof(buf) - len,
39218 "primary: %s (%s chan=%d ht=%d)\n",
39219 wiphy_name(sc->pri_wiphy->hw->wiphy),
39220diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39221index 80b19a4..dab3a45 100644
39222--- a/drivers/net/wireless/b43/debugfs.c
39223+++ b/drivers/net/wireless/b43/debugfs.c
39224@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39225 struct b43_debugfs_fops {
39226 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39227 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39228- struct file_operations fops;
39229+ const struct file_operations fops;
39230 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39231 size_t file_struct_offset;
39232 };
39233diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39234index 1f85ac5..c99b4b4 100644
39235--- a/drivers/net/wireless/b43legacy/debugfs.c
39236+++ b/drivers/net/wireless/b43legacy/debugfs.c
39237@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39238 struct b43legacy_debugfs_fops {
39239 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39240 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39241- struct file_operations fops;
39242+ const struct file_operations fops;
39243 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39244 size_t file_struct_offset;
39245 /* Take wl->irq_lock before calling read/write? */
39246diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39247index 43102bf..3b569c3 100644
39248--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39249+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39250@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39251 int err;
39252 DECLARE_SSID_BUF(ssid);
39253
39254+ pax_track_stack();
39255+
39256 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39257
39258 if (ssid_len)
39259@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39260 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39261 int err;
39262
39263+ pax_track_stack();
39264+
39265 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39266 idx, keylen, len);
39267
39268diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39269index 282b1f7..169f0cf 100644
39270--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39271+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39272@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39273 unsigned long flags;
39274 DECLARE_SSID_BUF(ssid);
39275
39276+ pax_track_stack();
39277+
39278 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39279 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39280 print_ssid(ssid, info_element->data, info_element->len),
39281diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39282index 950267a..80d5fd2 100644
39283--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39284+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39285@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39286 },
39287 };
39288
39289-static struct iwl_ops iwl1000_ops = {
39290+static const struct iwl_ops iwl1000_ops = {
39291 .ucode = &iwl5000_ucode,
39292 .lib = &iwl1000_lib,
39293 .hcmd = &iwl5000_hcmd,
39294diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39295index 56bfcc3..b348020 100644
39296--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39297+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39298@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39299 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39300 };
39301
39302-static struct iwl_ops iwl3945_ops = {
39303+static const struct iwl_ops iwl3945_ops = {
39304 .ucode = &iwl3945_ucode,
39305 .lib = &iwl3945_lib,
39306 .hcmd = &iwl3945_hcmd,
39307diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39308index 585b8d4..e142963 100644
39309--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39310+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39311@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39312 },
39313 };
39314
39315-static struct iwl_ops iwl4965_ops = {
39316+static const struct iwl_ops iwl4965_ops = {
39317 .ucode = &iwl4965_ucode,
39318 .lib = &iwl4965_lib,
39319 .hcmd = &iwl4965_hcmd,
39320diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39321index 1f423f2..e37c192 100644
39322--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39323+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39324@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39325 },
39326 };
39327
39328-struct iwl_ops iwl5000_ops = {
39329+const struct iwl_ops iwl5000_ops = {
39330 .ucode = &iwl5000_ucode,
39331 .lib = &iwl5000_lib,
39332 .hcmd = &iwl5000_hcmd,
39333 .utils = &iwl5000_hcmd_utils,
39334 };
39335
39336-static struct iwl_ops iwl5150_ops = {
39337+static const struct iwl_ops iwl5150_ops = {
39338 .ucode = &iwl5000_ucode,
39339 .lib = &iwl5150_lib,
39340 .hcmd = &iwl5000_hcmd,
39341diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39342index 1473452..f07d5e1 100644
39343--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39344+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39345@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39346 .calc_rssi = iwl5000_calc_rssi,
39347 };
39348
39349-static struct iwl_ops iwl6000_ops = {
39350+static const struct iwl_ops iwl6000_ops = {
39351 .ucode = &iwl5000_ucode,
39352 .lib = &iwl6000_lib,
39353 .hcmd = &iwl5000_hcmd,
39354diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39355index 1a3dfa2..b3e0a61 100644
39356--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39357+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39358@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39359 u8 active_index = 0;
39360 s32 tpt = 0;
39361
39362+ pax_track_stack();
39363+
39364 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39365
39366 if (!ieee80211_is_data(hdr->frame_control) ||
39367@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39368 u8 valid_tx_ant = 0;
39369 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39370
39371+ pax_track_stack();
39372+
39373 /* Override starting rate (index 0) if needed for debug purposes */
39374 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39375
39376diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39377index 0e56d78..6a3c107 100644
39378--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39379+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39380@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39381 if (iwl_debug_level & IWL_DL_INFO)
39382 dev_printk(KERN_DEBUG, &(pdev->dev),
39383 "Disabling hw_scan\n");
39384- iwl_hw_ops.hw_scan = NULL;
39385+ pax_open_kernel();
39386+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39387+ pax_close_kernel();
39388 }
39389
39390 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39391diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39392index cbc6290..eb323d7 100644
39393--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39394+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39395@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39396 #endif
39397
39398 #else
39399-#define IWL_DEBUG(__priv, level, fmt, args...)
39400-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39401+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39402+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39403 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39404 void *p, u32 len)
39405 {}
39406diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39407index a198bcf..8e68233 100644
39408--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39409+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39410@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39411 int pos = 0;
39412 const size_t bufsz = sizeof(buf);
39413
39414+ pax_track_stack();
39415+
39416 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39417 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39418 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39419@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39420 const size_t bufsz = sizeof(buf);
39421 ssize_t ret;
39422
39423+ pax_track_stack();
39424+
39425 for (i = 0; i < AC_NUM; i++) {
39426 pos += scnprintf(buf + pos, bufsz - pos,
39427 "\tcw_min\tcw_max\taifsn\ttxop\n");
39428diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39429index 3539ea4..b174bfa 100644
39430--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39431+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39432@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39433
39434 /* shared structures from iwl-5000.c */
39435 extern struct iwl_mod_params iwl50_mod_params;
39436-extern struct iwl_ops iwl5000_ops;
39437+extern const struct iwl_ops iwl5000_ops;
39438 extern struct iwl_ucode_ops iwl5000_ucode;
39439 extern struct iwl_lib_ops iwl5000_lib;
39440 extern struct iwl_hcmd_ops iwl5000_hcmd;
39441diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39442index 619590d..69235ee 100644
39443--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39444+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39445@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39446 */
39447 if (iwl3945_mod_params.disable_hw_scan) {
39448 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39449- iwl3945_hw_ops.hw_scan = NULL;
39450+ pax_open_kernel();
39451+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39452+ pax_close_kernel();
39453 }
39454
39455
39456diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39457index 1465379..fe4d78b 100644
39458--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39459+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39460@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39461 int buf_len = 512;
39462 size_t len = 0;
39463
39464+ pax_track_stack();
39465+
39466 if (*ppos != 0)
39467 return 0;
39468 if (count < sizeof(buf))
39469diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39470index 893a55c..7f66a50 100644
39471--- a/drivers/net/wireless/libertas/debugfs.c
39472+++ b/drivers/net/wireless/libertas/debugfs.c
39473@@ -708,7 +708,7 @@ out_unlock:
39474 struct lbs_debugfs_files {
39475 const char *name;
39476 int perm;
39477- struct file_operations fops;
39478+ const struct file_operations fops;
39479 };
39480
39481 static const struct lbs_debugfs_files debugfs_files[] = {
39482diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39483index 2ecbedb..42704f0 100644
39484--- a/drivers/net/wireless/rndis_wlan.c
39485+++ b/drivers/net/wireless/rndis_wlan.c
39486@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39487
39488 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39489
39490- if (rts_threshold < 0 || rts_threshold > 2347)
39491+ if (rts_threshold > 2347)
39492 rts_threshold = 2347;
39493
39494 tmp = cpu_to_le32(rts_threshold);
39495diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39496index 5c4df24..3b42925 100644
39497--- a/drivers/oprofile/buffer_sync.c
39498+++ b/drivers/oprofile/buffer_sync.c
39499@@ -341,7 +341,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39500 if (cookie == NO_COOKIE)
39501 offset = pc;
39502 if (cookie == INVALID_COOKIE) {
39503- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39504+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39505 offset = pc;
39506 }
39507 if (cookie != last_cookie) {
39508@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39509 /* add userspace sample */
39510
39511 if (!mm) {
39512- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39513+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39514 return 0;
39515 }
39516
39517 cookie = lookup_dcookie(mm, s->eip, &offset);
39518
39519 if (cookie == INVALID_COOKIE) {
39520- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39521+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39522 return 0;
39523 }
39524
39525@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
39526 /* ignore backtraces if failed to add a sample */
39527 if (state == sb_bt_start) {
39528 state = sb_bt_ignore;
39529- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39530+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39531 }
39532 }
39533 release_mm(mm);
39534diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39535index 5df60a6..72f5c1c 100644
39536--- a/drivers/oprofile/event_buffer.c
39537+++ b/drivers/oprofile/event_buffer.c
39538@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39539 }
39540
39541 if (buffer_pos == buffer_size) {
39542- atomic_inc(&oprofile_stats.event_lost_overflow);
39543+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39544 return;
39545 }
39546
39547diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39548index dc8a042..fe5f315 100644
39549--- a/drivers/oprofile/oprof.c
39550+++ b/drivers/oprofile/oprof.c
39551@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39552 if (oprofile_ops.switch_events())
39553 return;
39554
39555- atomic_inc(&oprofile_stats.multiplex_counter);
39556+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39557 start_switch_worker();
39558 }
39559
39560diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39561index 61689e8..387f7f8 100644
39562--- a/drivers/oprofile/oprofile_stats.c
39563+++ b/drivers/oprofile/oprofile_stats.c
39564@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39565 cpu_buf->sample_invalid_eip = 0;
39566 }
39567
39568- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39569- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39570- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39571- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39572- atomic_set(&oprofile_stats.multiplex_counter, 0);
39573+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39574+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39575+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39576+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39577+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39578 }
39579
39580
39581diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39582index 0b54e46..a37c527 100644
39583--- a/drivers/oprofile/oprofile_stats.h
39584+++ b/drivers/oprofile/oprofile_stats.h
39585@@ -13,11 +13,11 @@
39586 #include <asm/atomic.h>
39587
39588 struct oprofile_stat_struct {
39589- atomic_t sample_lost_no_mm;
39590- atomic_t sample_lost_no_mapping;
39591- atomic_t bt_lost_no_mapping;
39592- atomic_t event_lost_overflow;
39593- atomic_t multiplex_counter;
39594+ atomic_unchecked_t sample_lost_no_mm;
39595+ atomic_unchecked_t sample_lost_no_mapping;
39596+ atomic_unchecked_t bt_lost_no_mapping;
39597+ atomic_unchecked_t event_lost_overflow;
39598+ atomic_unchecked_t multiplex_counter;
39599 };
39600
39601 extern struct oprofile_stat_struct oprofile_stats;
39602diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39603index 2766a6d..80c77e2 100644
39604--- a/drivers/oprofile/oprofilefs.c
39605+++ b/drivers/oprofile/oprofilefs.c
39606@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39607
39608
39609 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39610- char const *name, atomic_t *val)
39611+ char const *name, atomic_unchecked_t *val)
39612 {
39613 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39614 &atomic_ro_fops, 0444);
39615diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39616index 13a64bc..ad62835 100644
39617--- a/drivers/parisc/pdc_stable.c
39618+++ b/drivers/parisc/pdc_stable.c
39619@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39620 return ret;
39621 }
39622
39623-static struct sysfs_ops pdcspath_attr_ops = {
39624+static const struct sysfs_ops pdcspath_attr_ops = {
39625 .show = pdcspath_attr_show,
39626 .store = pdcspath_attr_store,
39627 };
39628diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39629index 8eefe56..40751a7 100644
39630--- a/drivers/parport/procfs.c
39631+++ b/drivers/parport/procfs.c
39632@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39633
39634 *ppos += len;
39635
39636- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39637+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39638 }
39639
39640 #ifdef CONFIG_PARPORT_1284
39641@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39642
39643 *ppos += len;
39644
39645- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39646+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39647 }
39648 #endif /* IEEE1284.3 support. */
39649
39650diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39651index 73e7d8e..c80f3d2 100644
39652--- a/drivers/pci/hotplug/acpiphp_glue.c
39653+++ b/drivers/pci/hotplug/acpiphp_glue.c
39654@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39655 }
39656
39657
39658-static struct acpi_dock_ops acpiphp_dock_ops = {
39659+static const struct acpi_dock_ops acpiphp_dock_ops = {
39660 .handler = handle_hotplug_event_func,
39661 };
39662
39663diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39664index 9fff878..ad0ad53 100644
39665--- a/drivers/pci/hotplug/cpci_hotplug.h
39666+++ b/drivers/pci/hotplug/cpci_hotplug.h
39667@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39668 int (*hardware_test) (struct slot* slot, u32 value);
39669 u8 (*get_power) (struct slot* slot);
39670 int (*set_power) (struct slot* slot, int value);
39671-};
39672+} __no_const;
39673
39674 struct cpci_hp_controller {
39675 unsigned int irq;
39676diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39677index 76ba8a1..20ca857 100644
39678--- a/drivers/pci/hotplug/cpqphp_nvram.c
39679+++ b/drivers/pci/hotplug/cpqphp_nvram.c
39680@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39681
39682 void compaq_nvram_init (void __iomem *rom_start)
39683 {
39684+
39685+#ifndef CONFIG_PAX_KERNEXEC
39686 if (rom_start) {
39687 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39688 }
39689+#endif
39690+
39691 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39692
39693 /* initialize our int15 lock */
39694diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39695index 6151389..0a894ef 100644
39696--- a/drivers/pci/hotplug/fakephp.c
39697+++ b/drivers/pci/hotplug/fakephp.c
39698@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39699 }
39700
39701 static struct kobj_type legacy_ktype = {
39702- .sysfs_ops = &(struct sysfs_ops){
39703+ .sysfs_ops = &(const struct sysfs_ops){
39704 .store = legacy_store, .show = legacy_show
39705 },
39706 .release = &legacy_release,
39707diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39708index 5b680df..fe05b7e 100644
39709--- a/drivers/pci/intel-iommu.c
39710+++ b/drivers/pci/intel-iommu.c
39711@@ -2643,7 +2643,7 @@ error:
39712 return 0;
39713 }
39714
39715-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39716+dma_addr_t intel_map_page(struct device *dev, struct page *page,
39717 unsigned long offset, size_t size,
39718 enum dma_data_direction dir,
39719 struct dma_attrs *attrs)
39720@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39721 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39722 }
39723
39724-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39725+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39726 size_t size, enum dma_data_direction dir,
39727 struct dma_attrs *attrs)
39728 {
39729@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39730 }
39731 }
39732
39733-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39734+void *intel_alloc_coherent(struct device *hwdev, size_t size,
39735 dma_addr_t *dma_handle, gfp_t flags)
39736 {
39737 void *vaddr;
39738@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39739 return NULL;
39740 }
39741
39742-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39743+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39744 dma_addr_t dma_handle)
39745 {
39746 int order;
39747@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39748 free_pages((unsigned long)vaddr, order);
39749 }
39750
39751-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39752+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39753 int nelems, enum dma_data_direction dir,
39754 struct dma_attrs *attrs)
39755 {
39756@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
39757 return nelems;
39758 }
39759
39760-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39761+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39762 enum dma_data_direction dir, struct dma_attrs *attrs)
39763 {
39764 int i;
39765@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
39766 return nelems;
39767 }
39768
39769-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39770+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39771 {
39772 return !dma_addr;
39773 }
39774
39775-struct dma_map_ops intel_dma_ops = {
39776+const struct dma_map_ops intel_dma_ops = {
39777 .alloc_coherent = intel_alloc_coherent,
39778 .free_coherent = intel_free_coherent,
39779 .map_sg = intel_map_sg,
39780diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
39781index 5b7056c..607bc94 100644
39782--- a/drivers/pci/pcie/aspm.c
39783+++ b/drivers/pci/pcie/aspm.c
39784@@ -27,9 +27,9 @@
39785 #define MODULE_PARAM_PREFIX "pcie_aspm."
39786
39787 /* Note: those are not register definitions */
39788-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
39789-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
39790-#define ASPM_STATE_L1 (4) /* L1 state */
39791+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
39792+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
39793+#define ASPM_STATE_L1 (4U) /* L1 state */
39794 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
39795 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
39796
39797diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
39798index 8105e32..ca10419 100644
39799--- a/drivers/pci/probe.c
39800+++ b/drivers/pci/probe.c
39801@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
39802 return ret;
39803 }
39804
39805-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
39806+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
39807 struct device_attribute *attr,
39808 char *buf)
39809 {
39810 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
39811 }
39812
39813-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
39814+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
39815 struct device_attribute *attr,
39816 char *buf)
39817 {
39818diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
39819index a03ad8c..024b0da 100644
39820--- a/drivers/pci/proc.c
39821+++ b/drivers/pci/proc.c
39822@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
39823 static int __init pci_proc_init(void)
39824 {
39825 struct pci_dev *dev = NULL;
39826+
39827+#ifdef CONFIG_GRKERNSEC_PROC_ADD
39828+#ifdef CONFIG_GRKERNSEC_PROC_USER
39829+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
39830+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39831+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
39832+#endif
39833+#else
39834 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
39835+#endif
39836 proc_create("devices", 0, proc_bus_pci_dir,
39837 &proc_bus_pci_dev_operations);
39838 proc_initialized = 1;
39839diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
39840index 8c02b6c..5584d8e 100644
39841--- a/drivers/pci/slot.c
39842+++ b/drivers/pci/slot.c
39843@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
39844 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
39845 }
39846
39847-static struct sysfs_ops pci_slot_sysfs_ops = {
39848+static const struct sysfs_ops pci_slot_sysfs_ops = {
39849 .show = pci_slot_attr_show,
39850 .store = pci_slot_attr_store,
39851 };
39852diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
39853index 30cf71d2..50938f1 100644
39854--- a/drivers/pcmcia/pcmcia_ioctl.c
39855+++ b/drivers/pcmcia/pcmcia_ioctl.c
39856@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
39857 return -EFAULT;
39858 }
39859 }
39860- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39861+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39862 if (!buf)
39863 return -ENOMEM;
39864
39865diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
39866index 52183c4..b224c69 100644
39867--- a/drivers/platform/x86/acer-wmi.c
39868+++ b/drivers/platform/x86/acer-wmi.c
39869@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
39870 return 0;
39871 }
39872
39873-static struct backlight_ops acer_bl_ops = {
39874+static const struct backlight_ops acer_bl_ops = {
39875 .get_brightness = read_brightness,
39876 .update_status = update_bl_status,
39877 };
39878diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
39879index 767cb61..a87380b 100644
39880--- a/drivers/platform/x86/asus-laptop.c
39881+++ b/drivers/platform/x86/asus-laptop.c
39882@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
39883 */
39884 static int read_brightness(struct backlight_device *bd);
39885 static int update_bl_status(struct backlight_device *bd);
39886-static struct backlight_ops asusbl_ops = {
39887+static const struct backlight_ops asusbl_ops = {
39888 .get_brightness = read_brightness,
39889 .update_status = update_bl_status,
39890 };
39891diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
39892index d66c07a..a4abaac 100644
39893--- a/drivers/platform/x86/asus_acpi.c
39894+++ b/drivers/platform/x86/asus_acpi.c
39895@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
39896 return 0;
39897 }
39898
39899-static struct backlight_ops asus_backlight_data = {
39900+static const struct backlight_ops asus_backlight_data = {
39901 .get_brightness = read_brightness,
39902 .update_status = set_brightness_status,
39903 };
39904diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
39905index 11003bb..550ff1b 100644
39906--- a/drivers/platform/x86/compal-laptop.c
39907+++ b/drivers/platform/x86/compal-laptop.c
39908@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
39909 return set_lcd_level(b->props.brightness);
39910 }
39911
39912-static struct backlight_ops compalbl_ops = {
39913+static const struct backlight_ops compalbl_ops = {
39914 .get_brightness = bl_get_brightness,
39915 .update_status = bl_update_status,
39916 };
39917diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
39918index 07a74da..9dc99fa 100644
39919--- a/drivers/platform/x86/dell-laptop.c
39920+++ b/drivers/platform/x86/dell-laptop.c
39921@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
39922 return buffer.output[1];
39923 }
39924
39925-static struct backlight_ops dell_ops = {
39926+static const struct backlight_ops dell_ops = {
39927 .get_brightness = dell_get_intensity,
39928 .update_status = dell_send_intensity,
39929 };
39930diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
39931index c533b1c..5c81f22 100644
39932--- a/drivers/platform/x86/eeepc-laptop.c
39933+++ b/drivers/platform/x86/eeepc-laptop.c
39934@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
39935 */
39936 static int read_brightness(struct backlight_device *bd);
39937 static int update_bl_status(struct backlight_device *bd);
39938-static struct backlight_ops eeepcbl_ops = {
39939+static const struct backlight_ops eeepcbl_ops = {
39940 .get_brightness = read_brightness,
39941 .update_status = update_bl_status,
39942 };
39943diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
39944index bcd4ba8..a249b35 100644
39945--- a/drivers/platform/x86/fujitsu-laptop.c
39946+++ b/drivers/platform/x86/fujitsu-laptop.c
39947@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
39948 return ret;
39949 }
39950
39951-static struct backlight_ops fujitsubl_ops = {
39952+static const struct backlight_ops fujitsubl_ops = {
39953 .get_brightness = bl_get_brightness,
39954 .update_status = bl_update_status,
39955 };
39956diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
39957index 759763d..1093ba2 100644
39958--- a/drivers/platform/x86/msi-laptop.c
39959+++ b/drivers/platform/x86/msi-laptop.c
39960@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
39961 return set_lcd_level(b->props.brightness);
39962 }
39963
39964-static struct backlight_ops msibl_ops = {
39965+static const struct backlight_ops msibl_ops = {
39966 .get_brightness = bl_get_brightness,
39967 .update_status = bl_update_status,
39968 };
39969diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
39970index fe7cf01..9012d8d 100644
39971--- a/drivers/platform/x86/panasonic-laptop.c
39972+++ b/drivers/platform/x86/panasonic-laptop.c
39973@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
39974 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
39975 }
39976
39977-static struct backlight_ops pcc_backlight_ops = {
39978+static const struct backlight_ops pcc_backlight_ops = {
39979 .get_brightness = bl_get,
39980 .update_status = bl_set_status,
39981 };
39982diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
39983index a2a742c..b37e25e 100644
39984--- a/drivers/platform/x86/sony-laptop.c
39985+++ b/drivers/platform/x86/sony-laptop.c
39986@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
39987 }
39988
39989 static struct backlight_device *sony_backlight_device;
39990-static struct backlight_ops sony_backlight_ops = {
39991+static const struct backlight_ops sony_backlight_ops = {
39992 .update_status = sony_backlight_update_status,
39993 .get_brightness = sony_backlight_get_brightness,
39994 };
39995diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
39996index 68271ae..5e8fb10 100644
39997--- a/drivers/platform/x86/thinkpad_acpi.c
39998+++ b/drivers/platform/x86/thinkpad_acpi.c
39999@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40000 return 0;
40001 }
40002
40003-void static hotkey_mask_warn_incomplete_mask(void)
40004+static void hotkey_mask_warn_incomplete_mask(void)
40005 {
40006 /* log only what the user can fix... */
40007 const u32 wantedmask = hotkey_driver_mask &
40008@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40009 BACKLIGHT_UPDATE_HOTKEY);
40010 }
40011
40012-static struct backlight_ops ibm_backlight_data = {
40013+static const struct backlight_ops ibm_backlight_data = {
40014 .get_brightness = brightness_get,
40015 .update_status = brightness_update_status,
40016 };
40017diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40018index 51c0a8b..0786629 100644
40019--- a/drivers/platform/x86/toshiba_acpi.c
40020+++ b/drivers/platform/x86/toshiba_acpi.c
40021@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40022 return AE_OK;
40023 }
40024
40025-static struct backlight_ops toshiba_backlight_data = {
40026+static const struct backlight_ops toshiba_backlight_data = {
40027 .get_brightness = get_lcd,
40028 .update_status = set_lcd_status,
40029 };
40030diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40031index fc83783c..cf370d7 100644
40032--- a/drivers/pnp/pnpbios/bioscalls.c
40033+++ b/drivers/pnp/pnpbios/bioscalls.c
40034@@ -60,7 +60,7 @@ do { \
40035 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40036 } while(0)
40037
40038-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40039+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40040 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40041
40042 /*
40043@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40044
40045 cpu = get_cpu();
40046 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40047+
40048+ pax_open_kernel();
40049 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40050+ pax_close_kernel();
40051
40052 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40053 spin_lock_irqsave(&pnp_bios_lock, flags);
40054@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40055 :"memory");
40056 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40057
40058+ pax_open_kernel();
40059 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40060+ pax_close_kernel();
40061+
40062 put_cpu();
40063
40064 /* If we get here and this is set then the PnP BIOS faulted on us. */
40065@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40066 return status;
40067 }
40068
40069-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40070+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40071 {
40072 int i;
40073
40074@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40075 pnp_bios_callpoint.offset = header->fields.pm16offset;
40076 pnp_bios_callpoint.segment = PNP_CS16;
40077
40078+ pax_open_kernel();
40079+
40080 for_each_possible_cpu(i) {
40081 struct desc_struct *gdt = get_cpu_gdt_table(i);
40082 if (!gdt)
40083@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40084 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40085 (unsigned long)__va(header->fields.pm16dseg));
40086 }
40087+
40088+ pax_close_kernel();
40089 }
40090diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40091index ba97654..66b99d4 100644
40092--- a/drivers/pnp/resource.c
40093+++ b/drivers/pnp/resource.c
40094@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40095 return 1;
40096
40097 /* check if the resource is valid */
40098- if (*irq < 0 || *irq > 15)
40099+ if (*irq > 15)
40100 return 0;
40101
40102 /* check if the resource is reserved */
40103@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40104 return 1;
40105
40106 /* check if the resource is valid */
40107- if (*dma < 0 || *dma == 4 || *dma > 7)
40108+ if (*dma == 4 || *dma > 7)
40109 return 0;
40110
40111 /* check if the resource is reserved */
40112diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40113index 62bb981..24a2dc9 100644
40114--- a/drivers/power/bq27x00_battery.c
40115+++ b/drivers/power/bq27x00_battery.c
40116@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40117 struct bq27x00_access_methods {
40118 int (*read)(u8 reg, int *rt_value, int b_single,
40119 struct bq27x00_device_info *di);
40120-};
40121+} __no_const;
40122
40123 struct bq27x00_device_info {
40124 struct device *dev;
40125diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40126index 62227cd..b5b538b 100644
40127--- a/drivers/rtc/rtc-dev.c
40128+++ b/drivers/rtc/rtc-dev.c
40129@@ -14,6 +14,7 @@
40130 #include <linux/module.h>
40131 #include <linux/rtc.h>
40132 #include <linux/sched.h>
40133+#include <linux/grsecurity.h>
40134 #include "rtc-core.h"
40135
40136 static dev_t rtc_devt;
40137@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40138 if (copy_from_user(&tm, uarg, sizeof(tm)))
40139 return -EFAULT;
40140
40141+ gr_log_timechange();
40142+
40143 return rtc_set_time(rtc, &tm);
40144
40145 case RTC_PIE_ON:
40146diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40147index 968e3c7..fbc637a 100644
40148--- a/drivers/s390/cio/qdio_perf.c
40149+++ b/drivers/s390/cio/qdio_perf.c
40150@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40151 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40152 {
40153 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40154- (long)atomic_long_read(&perf_stats.qdio_int));
40155+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40156 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40157- (long)atomic_long_read(&perf_stats.pci_int));
40158+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40159 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40160- (long)atomic_long_read(&perf_stats.thin_int));
40161+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40162 seq_printf(m, "\n");
40163 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40164- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40165+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40166 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40167- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40168+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40169 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40170- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40171- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40172+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40173+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40174 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40175- (long)atomic_long_read(&perf_stats.thinint_inbound),
40176- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40177+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40178+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40179 seq_printf(m, "\n");
40180 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40181- (long)atomic_long_read(&perf_stats.siga_in));
40182+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40183 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40184- (long)atomic_long_read(&perf_stats.siga_out));
40185+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40186 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40187- (long)atomic_long_read(&perf_stats.siga_sync));
40188+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40189 seq_printf(m, "\n");
40190 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40191- (long)atomic_long_read(&perf_stats.inbound_handler));
40192+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40193 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40194- (long)atomic_long_read(&perf_stats.outbound_handler));
40195+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40196 seq_printf(m, "\n");
40197 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40198- (long)atomic_long_read(&perf_stats.fast_requeue));
40199+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40200 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40201- (long)atomic_long_read(&perf_stats.outbound_target_full));
40202+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40203 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40204- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40205+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40206 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40207- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40208+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40209 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40210- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40211+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40212 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40213- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40214- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40215+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40216+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40217 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40218- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40219- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40220+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40221+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40222 seq_printf(m, "\n");
40223 return 0;
40224 }
40225diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40226index ff4504c..b3604c3 100644
40227--- a/drivers/s390/cio/qdio_perf.h
40228+++ b/drivers/s390/cio/qdio_perf.h
40229@@ -13,46 +13,46 @@
40230
40231 struct qdio_perf_stats {
40232 /* interrupt handler calls */
40233- atomic_long_t qdio_int;
40234- atomic_long_t pci_int;
40235- atomic_long_t thin_int;
40236+ atomic_long_unchecked_t qdio_int;
40237+ atomic_long_unchecked_t pci_int;
40238+ atomic_long_unchecked_t thin_int;
40239
40240 /* tasklet runs */
40241- atomic_long_t tasklet_inbound;
40242- atomic_long_t tasklet_outbound;
40243- atomic_long_t tasklet_thinint;
40244- atomic_long_t tasklet_thinint_loop;
40245- atomic_long_t thinint_inbound;
40246- atomic_long_t thinint_inbound_loop;
40247- atomic_long_t thinint_inbound_loop2;
40248+ atomic_long_unchecked_t tasklet_inbound;
40249+ atomic_long_unchecked_t tasklet_outbound;
40250+ atomic_long_unchecked_t tasklet_thinint;
40251+ atomic_long_unchecked_t tasklet_thinint_loop;
40252+ atomic_long_unchecked_t thinint_inbound;
40253+ atomic_long_unchecked_t thinint_inbound_loop;
40254+ atomic_long_unchecked_t thinint_inbound_loop2;
40255
40256 /* signal adapter calls */
40257- atomic_long_t siga_out;
40258- atomic_long_t siga_in;
40259- atomic_long_t siga_sync;
40260+ atomic_long_unchecked_t siga_out;
40261+ atomic_long_unchecked_t siga_in;
40262+ atomic_long_unchecked_t siga_sync;
40263
40264 /* misc */
40265- atomic_long_t inbound_handler;
40266- atomic_long_t outbound_handler;
40267- atomic_long_t fast_requeue;
40268- atomic_long_t outbound_target_full;
40269+ atomic_long_unchecked_t inbound_handler;
40270+ atomic_long_unchecked_t outbound_handler;
40271+ atomic_long_unchecked_t fast_requeue;
40272+ atomic_long_unchecked_t outbound_target_full;
40273
40274 /* for debugging */
40275- atomic_long_t debug_tl_out_timer;
40276- atomic_long_t debug_stop_polling;
40277- atomic_long_t debug_eqbs_all;
40278- atomic_long_t debug_eqbs_incomplete;
40279- atomic_long_t debug_sqbs_all;
40280- atomic_long_t debug_sqbs_incomplete;
40281+ atomic_long_unchecked_t debug_tl_out_timer;
40282+ atomic_long_unchecked_t debug_stop_polling;
40283+ atomic_long_unchecked_t debug_eqbs_all;
40284+ atomic_long_unchecked_t debug_eqbs_incomplete;
40285+ atomic_long_unchecked_t debug_sqbs_all;
40286+ atomic_long_unchecked_t debug_sqbs_incomplete;
40287 };
40288
40289 extern struct qdio_perf_stats perf_stats;
40290 extern int qdio_performance_stats;
40291
40292-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40293+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40294 {
40295 if (qdio_performance_stats)
40296- atomic_long_inc(count);
40297+ atomic_long_inc_unchecked(count);
40298 }
40299
40300 int qdio_setup_perf_stats(void);
40301diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40302index 1ddcf40..a85f062 100644
40303--- a/drivers/scsi/BusLogic.c
40304+++ b/drivers/scsi/BusLogic.c
40305@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40306 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40307 *PrototypeHostAdapter)
40308 {
40309+ pax_track_stack();
40310+
40311 /*
40312 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40313 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40314diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40315index cdbdec9..b7d560b 100644
40316--- a/drivers/scsi/aacraid/aacraid.h
40317+++ b/drivers/scsi/aacraid/aacraid.h
40318@@ -471,7 +471,7 @@ struct adapter_ops
40319 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40320 /* Administrative operations */
40321 int (*adapter_comm)(struct aac_dev * dev, int comm);
40322-};
40323+} __no_const;
40324
40325 /*
40326 * Define which interrupt handler needs to be installed
40327diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40328index a5b8e7b..a6a0e43 100644
40329--- a/drivers/scsi/aacraid/commctrl.c
40330+++ b/drivers/scsi/aacraid/commctrl.c
40331@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40332 u32 actual_fibsize64, actual_fibsize = 0;
40333 int i;
40334
40335+ pax_track_stack();
40336
40337 if (dev->in_reset) {
40338 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40339diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40340index 9b97c3e..f099725 100644
40341--- a/drivers/scsi/aacraid/linit.c
40342+++ b/drivers/scsi/aacraid/linit.c
40343@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40344 #elif defined(__devinitconst)
40345 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40346 #else
40347-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40348+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40349 #endif
40350 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40351 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40352diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40353index 996f722..9127845 100644
40354--- a/drivers/scsi/aic94xx/aic94xx_init.c
40355+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40356@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40357 flash_error_table[i].reason);
40358 }
40359
40360-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40361+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40362 asd_show_update_bios, asd_store_update_bios);
40363
40364 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40365@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40366 .lldd_control_phy = asd_control_phy,
40367 };
40368
40369-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40370+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40371 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40372 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40373 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40374diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40375index 58efd4b..cb48dc7 100644
40376--- a/drivers/scsi/bfa/bfa_ioc.h
40377+++ b/drivers/scsi/bfa/bfa_ioc.h
40378@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40379 bfa_ioc_disable_cbfn_t disable_cbfn;
40380 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40381 bfa_ioc_reset_cbfn_t reset_cbfn;
40382-};
40383+} __no_const;
40384
40385 /**
40386 * Heartbeat failure notification queue element.
40387diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40388index 7ad177e..5503586 100644
40389--- a/drivers/scsi/bfa/bfa_iocfc.h
40390+++ b/drivers/scsi/bfa/bfa_iocfc.h
40391@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40392 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40393 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40394 u32 *nvecs, u32 *maxvec);
40395-};
40396+} __no_const;
40397 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40398
40399 struct bfa_iocfc_s {
40400diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40401index 4967643..cbec06b 100644
40402--- a/drivers/scsi/dpt_i2o.c
40403+++ b/drivers/scsi/dpt_i2o.c
40404@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40405 dma_addr_t addr;
40406 ulong flags = 0;
40407
40408+ pax_track_stack();
40409+
40410 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40411 // get user msg size in u32s
40412 if(get_user(size, &user_msg[0])){
40413@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40414 s32 rcode;
40415 dma_addr_t addr;
40416
40417+ pax_track_stack();
40418+
40419 memset(msg, 0 , sizeof(msg));
40420 len = scsi_bufflen(cmd);
40421 direction = 0x00000000;
40422diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40423index c7076ce..e20c67c 100644
40424--- a/drivers/scsi/eata.c
40425+++ b/drivers/scsi/eata.c
40426@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40427 struct hostdata *ha;
40428 char name[16];
40429
40430+ pax_track_stack();
40431+
40432 sprintf(name, "%s%d", driver_name, j);
40433
40434 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40435diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40436index 11ae5c9..891daec 100644
40437--- a/drivers/scsi/fcoe/libfcoe.c
40438+++ b/drivers/scsi/fcoe/libfcoe.c
40439@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40440 size_t rlen;
40441 size_t dlen;
40442
40443+ pax_track_stack();
40444+
40445 fiph = (struct fip_header *)skb->data;
40446 sub = fiph->fip_subcode;
40447 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40448diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40449index 71c7bbe..e93088a 100644
40450--- a/drivers/scsi/fnic/fnic_main.c
40451+++ b/drivers/scsi/fnic/fnic_main.c
40452@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40453 /* Start local port initiatialization */
40454
40455 lp->link_up = 0;
40456- lp->tt = fnic_transport_template;
40457+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40458
40459 lp->max_retry_count = fnic->config.flogi_retries;
40460 lp->max_rport_retry_count = fnic->config.plogi_retries;
40461diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40462index bb96d74..9ec3ce4 100644
40463--- a/drivers/scsi/gdth.c
40464+++ b/drivers/scsi/gdth.c
40465@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40466 ulong flags;
40467 gdth_ha_str *ha;
40468
40469+ pax_track_stack();
40470+
40471 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40472 return -EFAULT;
40473 ha = gdth_find_ha(ldrv.ionode);
40474@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40475 gdth_ha_str *ha;
40476 int rval;
40477
40478+ pax_track_stack();
40479+
40480 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40481 res.number >= MAX_HDRIVES)
40482 return -EFAULT;
40483@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40484 gdth_ha_str *ha;
40485 int rval;
40486
40487+ pax_track_stack();
40488+
40489 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40490 return -EFAULT;
40491 ha = gdth_find_ha(gen.ionode);
40492@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40493 int i;
40494 gdth_cmd_str gdtcmd;
40495 char cmnd[MAX_COMMAND_SIZE];
40496+
40497+ pax_track_stack();
40498+
40499 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40500
40501 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40502diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40503index 1258da3..20d8ae6 100644
40504--- a/drivers/scsi/gdth_proc.c
40505+++ b/drivers/scsi/gdth_proc.c
40506@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40507 ulong64 paddr;
40508
40509 char cmnd[MAX_COMMAND_SIZE];
40510+
40511+ pax_track_stack();
40512+
40513 memset(cmnd, 0xff, 12);
40514 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40515
40516@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40517 gdth_hget_str *phg;
40518 char cmnd[MAX_COMMAND_SIZE];
40519
40520+ pax_track_stack();
40521+
40522 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40523 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40524 if (!gdtcmd || !estr)
40525diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40526index d03a926..f324286 100644
40527--- a/drivers/scsi/hosts.c
40528+++ b/drivers/scsi/hosts.c
40529@@ -40,7 +40,7 @@
40530 #include "scsi_logging.h"
40531
40532
40533-static atomic_t scsi_host_next_hn; /* host_no for next new host */
40534+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40535
40536
40537 static void scsi_host_cls_release(struct device *dev)
40538@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40539 * subtract one because we increment first then return, but we need to
40540 * know what the next host number was before increment
40541 */
40542- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40543+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40544 shost->dma_channel = 0xff;
40545
40546 /* These three are default values which can be overridden */
40547diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40548index a601159..55e19d2 100644
40549--- a/drivers/scsi/ipr.c
40550+++ b/drivers/scsi/ipr.c
40551@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40552 return true;
40553 }
40554
40555-static struct ata_port_operations ipr_sata_ops = {
40556+static const struct ata_port_operations ipr_sata_ops = {
40557 .phy_reset = ipr_ata_phy_reset,
40558 .hardreset = ipr_sata_reset,
40559 .post_internal_cmd = ipr_ata_post_internal,
40560diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40561index 4e49fbc..97907ff 100644
40562--- a/drivers/scsi/ips.h
40563+++ b/drivers/scsi/ips.h
40564@@ -1027,7 +1027,7 @@ typedef struct {
40565 int (*intr)(struct ips_ha *);
40566 void (*enableint)(struct ips_ha *);
40567 uint32_t (*statupd)(struct ips_ha *);
40568-} ips_hw_func_t;
40569+} __no_const ips_hw_func_t;
40570
40571 typedef struct ips_ha {
40572 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40573diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40574index c1c1574..a9c9348 100644
40575--- a/drivers/scsi/libfc/fc_exch.c
40576+++ b/drivers/scsi/libfc/fc_exch.c
40577@@ -86,12 +86,12 @@ struct fc_exch_mgr {
40578 * all together if not used XXX
40579 */
40580 struct {
40581- atomic_t no_free_exch;
40582- atomic_t no_free_exch_xid;
40583- atomic_t xid_not_found;
40584- atomic_t xid_busy;
40585- atomic_t seq_not_found;
40586- atomic_t non_bls_resp;
40587+ atomic_unchecked_t no_free_exch;
40588+ atomic_unchecked_t no_free_exch_xid;
40589+ atomic_unchecked_t xid_not_found;
40590+ atomic_unchecked_t xid_busy;
40591+ atomic_unchecked_t seq_not_found;
40592+ atomic_unchecked_t non_bls_resp;
40593 } stats;
40594 };
40595 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40596@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40597 /* allocate memory for exchange */
40598 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40599 if (!ep) {
40600- atomic_inc(&mp->stats.no_free_exch);
40601+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40602 goto out;
40603 }
40604 memset(ep, 0, sizeof(*ep));
40605@@ -557,7 +557,7 @@ out:
40606 return ep;
40607 err:
40608 spin_unlock_bh(&pool->lock);
40609- atomic_inc(&mp->stats.no_free_exch_xid);
40610+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40611 mempool_free(ep, mp->ep_pool);
40612 return NULL;
40613 }
40614@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40615 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40616 ep = fc_exch_find(mp, xid);
40617 if (!ep) {
40618- atomic_inc(&mp->stats.xid_not_found);
40619+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40620 reject = FC_RJT_OX_ID;
40621 goto out;
40622 }
40623@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40624 ep = fc_exch_find(mp, xid);
40625 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40626 if (ep) {
40627- atomic_inc(&mp->stats.xid_busy);
40628+ atomic_inc_unchecked(&mp->stats.xid_busy);
40629 reject = FC_RJT_RX_ID;
40630 goto rel;
40631 }
40632@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40633 }
40634 xid = ep->xid; /* get our XID */
40635 } else if (!ep) {
40636- atomic_inc(&mp->stats.xid_not_found);
40637+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40638 reject = FC_RJT_RX_ID; /* XID not found */
40639 goto out;
40640 }
40641@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40642 } else {
40643 sp = &ep->seq;
40644 if (sp->id != fh->fh_seq_id) {
40645- atomic_inc(&mp->stats.seq_not_found);
40646+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40647 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40648 goto rel;
40649 }
40650@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40651
40652 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40653 if (!ep) {
40654- atomic_inc(&mp->stats.xid_not_found);
40655+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40656 goto out;
40657 }
40658 if (ep->esb_stat & ESB_ST_COMPLETE) {
40659- atomic_inc(&mp->stats.xid_not_found);
40660+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40661 goto out;
40662 }
40663 if (ep->rxid == FC_XID_UNKNOWN)
40664 ep->rxid = ntohs(fh->fh_rx_id);
40665 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40666- atomic_inc(&mp->stats.xid_not_found);
40667+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40668 goto rel;
40669 }
40670 if (ep->did != ntoh24(fh->fh_s_id) &&
40671 ep->did != FC_FID_FLOGI) {
40672- atomic_inc(&mp->stats.xid_not_found);
40673+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40674 goto rel;
40675 }
40676 sof = fr_sof(fp);
40677@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40678 } else {
40679 sp = &ep->seq;
40680 if (sp->id != fh->fh_seq_id) {
40681- atomic_inc(&mp->stats.seq_not_found);
40682+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40683 goto rel;
40684 }
40685 }
40686@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40687 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40688
40689 if (!sp)
40690- atomic_inc(&mp->stats.xid_not_found);
40691+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40692 else
40693- atomic_inc(&mp->stats.non_bls_resp);
40694+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
40695
40696 fc_frame_free(fp);
40697 }
40698diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40699index 0ee989f..a582241 100644
40700--- a/drivers/scsi/libsas/sas_ata.c
40701+++ b/drivers/scsi/libsas/sas_ata.c
40702@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40703 }
40704 }
40705
40706-static struct ata_port_operations sas_sata_ops = {
40707+static const struct ata_port_operations sas_sata_ops = {
40708 .phy_reset = sas_ata_phy_reset,
40709 .post_internal_cmd = sas_ata_post_internal,
40710 .qc_defer = ata_std_qc_defer,
40711diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40712index aa10f79..5cc79e4 100644
40713--- a/drivers/scsi/lpfc/lpfc.h
40714+++ b/drivers/scsi/lpfc/lpfc.h
40715@@ -400,7 +400,7 @@ struct lpfc_vport {
40716 struct dentry *debug_nodelist;
40717 struct dentry *vport_debugfs_root;
40718 struct lpfc_debugfs_trc *disc_trc;
40719- atomic_t disc_trc_cnt;
40720+ atomic_unchecked_t disc_trc_cnt;
40721 #endif
40722 uint8_t stat_data_enabled;
40723 uint8_t stat_data_blocked;
40724@@ -725,8 +725,8 @@ struct lpfc_hba {
40725 struct timer_list fabric_block_timer;
40726 unsigned long bit_flags;
40727 #define FABRIC_COMANDS_BLOCKED 0
40728- atomic_t num_rsrc_err;
40729- atomic_t num_cmd_success;
40730+ atomic_unchecked_t num_rsrc_err;
40731+ atomic_unchecked_t num_cmd_success;
40732 unsigned long last_rsrc_error_time;
40733 unsigned long last_ramp_down_time;
40734 unsigned long last_ramp_up_time;
40735@@ -740,7 +740,7 @@ struct lpfc_hba {
40736 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40737 struct dentry *debug_slow_ring_trc;
40738 struct lpfc_debugfs_trc *slow_ring_trc;
40739- atomic_t slow_ring_trc_cnt;
40740+ atomic_unchecked_t slow_ring_trc_cnt;
40741 #endif
40742
40743 /* Used for deferred freeing of ELS data buffers */
40744diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
40745index 8d0f0de..7c77a62 100644
40746--- a/drivers/scsi/lpfc/lpfc_debugfs.c
40747+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
40748@@ -124,7 +124,7 @@ struct lpfc_debug {
40749 int len;
40750 };
40751
40752-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40753+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40754 static unsigned long lpfc_debugfs_start_time = 0L;
40755
40756 /**
40757@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
40758 lpfc_debugfs_enable = 0;
40759
40760 len = 0;
40761- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
40762+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
40763 (lpfc_debugfs_max_disc_trc - 1);
40764 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
40765 dtp = vport->disc_trc + i;
40766@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
40767 lpfc_debugfs_enable = 0;
40768
40769 len = 0;
40770- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
40771+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
40772 (lpfc_debugfs_max_slow_ring_trc - 1);
40773 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
40774 dtp = phba->slow_ring_trc + i;
40775@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
40776 uint32_t *ptr;
40777 char buffer[1024];
40778
40779+ pax_track_stack();
40780+
40781 off = 0;
40782 spin_lock_irq(&phba->hbalock);
40783
40784@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
40785 !vport || !vport->disc_trc)
40786 return;
40787
40788- index = atomic_inc_return(&vport->disc_trc_cnt) &
40789+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
40790 (lpfc_debugfs_max_disc_trc - 1);
40791 dtp = vport->disc_trc + index;
40792 dtp->fmt = fmt;
40793 dtp->data1 = data1;
40794 dtp->data2 = data2;
40795 dtp->data3 = data3;
40796- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40797+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40798 dtp->jif = jiffies;
40799 #endif
40800 return;
40801@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
40802 !phba || !phba->slow_ring_trc)
40803 return;
40804
40805- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
40806+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
40807 (lpfc_debugfs_max_slow_ring_trc - 1);
40808 dtp = phba->slow_ring_trc + index;
40809 dtp->fmt = fmt;
40810 dtp->data1 = data1;
40811 dtp->data2 = data2;
40812 dtp->data3 = data3;
40813- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40814+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40815 dtp->jif = jiffies;
40816 #endif
40817 return;
40818@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40819 "slow_ring buffer\n");
40820 goto debug_failed;
40821 }
40822- atomic_set(&phba->slow_ring_trc_cnt, 0);
40823+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
40824 memset(phba->slow_ring_trc, 0,
40825 (sizeof(struct lpfc_debugfs_trc) *
40826 lpfc_debugfs_max_slow_ring_trc));
40827@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40828 "buffer\n");
40829 goto debug_failed;
40830 }
40831- atomic_set(&vport->disc_trc_cnt, 0);
40832+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
40833
40834 snprintf(name, sizeof(name), "discovery_trace");
40835 vport->debug_disc_trc =
40836diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
40837index 549bc7d..8189dbb 100644
40838--- a/drivers/scsi/lpfc/lpfc_init.c
40839+++ b/drivers/scsi/lpfc/lpfc_init.c
40840@@ -8021,8 +8021,10 @@ lpfc_init(void)
40841 printk(LPFC_COPYRIGHT "\n");
40842
40843 if (lpfc_enable_npiv) {
40844- lpfc_transport_functions.vport_create = lpfc_vport_create;
40845- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40846+ pax_open_kernel();
40847+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
40848+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40849+ pax_close_kernel();
40850 }
40851 lpfc_transport_template =
40852 fc_attach_transport(&lpfc_transport_functions);
40853diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
40854index c88f59f..ff2a42f 100644
40855--- a/drivers/scsi/lpfc/lpfc_scsi.c
40856+++ b/drivers/scsi/lpfc/lpfc_scsi.c
40857@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
40858 uint32_t evt_posted;
40859
40860 spin_lock_irqsave(&phba->hbalock, flags);
40861- atomic_inc(&phba->num_rsrc_err);
40862+ atomic_inc_unchecked(&phba->num_rsrc_err);
40863 phba->last_rsrc_error_time = jiffies;
40864
40865 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
40866@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
40867 unsigned long flags;
40868 struct lpfc_hba *phba = vport->phba;
40869 uint32_t evt_posted;
40870- atomic_inc(&phba->num_cmd_success);
40871+ atomic_inc_unchecked(&phba->num_cmd_success);
40872
40873 if (vport->cfg_lun_queue_depth <= queue_depth)
40874 return;
40875@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40876 int i;
40877 struct lpfc_rport_data *rdata;
40878
40879- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
40880- num_cmd_success = atomic_read(&phba->num_cmd_success);
40881+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
40882+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
40883
40884 vports = lpfc_create_vport_work_array(phba);
40885 if (vports != NULL)
40886@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40887 }
40888 }
40889 lpfc_destroy_vport_work_array(phba, vports);
40890- atomic_set(&phba->num_rsrc_err, 0);
40891- atomic_set(&phba->num_cmd_success, 0);
40892+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
40893+ atomic_set_unchecked(&phba->num_cmd_success, 0);
40894 }
40895
40896 /**
40897@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
40898 }
40899 }
40900 lpfc_destroy_vport_work_array(phba, vports);
40901- atomic_set(&phba->num_rsrc_err, 0);
40902- atomic_set(&phba->num_cmd_success, 0);
40903+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
40904+ atomic_set_unchecked(&phba->num_cmd_success, 0);
40905 }
40906
40907 /**
40908diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
40909index 234f0b7..3020aea 100644
40910--- a/drivers/scsi/megaraid/megaraid_mbox.c
40911+++ b/drivers/scsi/megaraid/megaraid_mbox.c
40912@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
40913 int rval;
40914 int i;
40915
40916+ pax_track_stack();
40917+
40918 // Allocate memory for the base list of scb for management module.
40919 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
40920
40921diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
40922index 7a117c1..ee01e9e 100644
40923--- a/drivers/scsi/osd/osd_initiator.c
40924+++ b/drivers/scsi/osd/osd_initiator.c
40925@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
40926 int nelem = ARRAY_SIZE(get_attrs), a = 0;
40927 int ret;
40928
40929+ pax_track_stack();
40930+
40931 or = osd_start_request(od, GFP_KERNEL);
40932 if (!or)
40933 return -ENOMEM;
40934diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
40935index 9ab8c86..9425ad3 100644
40936--- a/drivers/scsi/pmcraid.c
40937+++ b/drivers/scsi/pmcraid.c
40938@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
40939 res->scsi_dev = scsi_dev;
40940 scsi_dev->hostdata = res;
40941 res->change_detected = 0;
40942- atomic_set(&res->read_failures, 0);
40943- atomic_set(&res->write_failures, 0);
40944+ atomic_set_unchecked(&res->read_failures, 0);
40945+ atomic_set_unchecked(&res->write_failures, 0);
40946 rc = 0;
40947 }
40948 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
40949@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
40950
40951 /* If this was a SCSI read/write command keep count of errors */
40952 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
40953- atomic_inc(&res->read_failures);
40954+ atomic_inc_unchecked(&res->read_failures);
40955 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
40956- atomic_inc(&res->write_failures);
40957+ atomic_inc_unchecked(&res->write_failures);
40958
40959 if (!RES_IS_GSCSI(res->cfg_entry) &&
40960 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
40961@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
40962
40963 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
40964 /* add resources only after host is added into system */
40965- if (!atomic_read(&pinstance->expose_resources))
40966+ if (!atomic_read_unchecked(&pinstance->expose_resources))
40967 return;
40968
40969 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
40970@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
40971 init_waitqueue_head(&pinstance->reset_wait_q);
40972
40973 atomic_set(&pinstance->outstanding_cmds, 0);
40974- atomic_set(&pinstance->expose_resources, 0);
40975+ atomic_set_unchecked(&pinstance->expose_resources, 0);
40976
40977 INIT_LIST_HEAD(&pinstance->free_res_q);
40978 INIT_LIST_HEAD(&pinstance->used_res_q);
40979@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
40980 /* Schedule worker thread to handle CCN and take care of adding and
40981 * removing devices to OS
40982 */
40983- atomic_set(&pinstance->expose_resources, 1);
40984+ atomic_set_unchecked(&pinstance->expose_resources, 1);
40985 schedule_work(&pinstance->worker_q);
40986 return rc;
40987
40988diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
40989index 3441b3f..6cbe8f7 100644
40990--- a/drivers/scsi/pmcraid.h
40991+++ b/drivers/scsi/pmcraid.h
40992@@ -690,7 +690,7 @@ struct pmcraid_instance {
40993 atomic_t outstanding_cmds;
40994
40995 /* should add/delete resources to mid-layer now ?*/
40996- atomic_t expose_resources;
40997+ atomic_unchecked_t expose_resources;
40998
40999 /* Tasklet to handle deferred processing */
41000 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41001@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41002 struct list_head queue; /* link to "to be exposed" resources */
41003 struct pmcraid_config_table_entry cfg_entry;
41004 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41005- atomic_t read_failures; /* count of failed READ commands */
41006- atomic_t write_failures; /* count of failed WRITE commands */
41007+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41008+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41009
41010 /* To indicate add/delete/modify during CCN */
41011 u8 change_detected;
41012diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41013index 2150618..7034215 100644
41014--- a/drivers/scsi/qla2xxx/qla_def.h
41015+++ b/drivers/scsi/qla2xxx/qla_def.h
41016@@ -2089,7 +2089,7 @@ struct isp_operations {
41017
41018 int (*get_flash_version) (struct scsi_qla_host *, void *);
41019 int (*start_scsi) (srb_t *);
41020-};
41021+} __no_const;
41022
41023 /* MSI-X Support *************************************************************/
41024
41025diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41026index 81b5f29..2ae1fad 100644
41027--- a/drivers/scsi/qla4xxx/ql4_def.h
41028+++ b/drivers/scsi/qla4xxx/ql4_def.h
41029@@ -240,7 +240,7 @@ struct ddb_entry {
41030 atomic_t retry_relogin_timer; /* Min Time between relogins
41031 * (4000 only) */
41032 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41033- atomic_t relogin_retry_count; /* Num of times relogin has been
41034+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41035 * retried */
41036
41037 uint16_t port;
41038diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41039index af8c323..515dd51 100644
41040--- a/drivers/scsi/qla4xxx/ql4_init.c
41041+++ b/drivers/scsi/qla4xxx/ql4_init.c
41042@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41043 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41044 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41045 atomic_set(&ddb_entry->relogin_timer, 0);
41046- atomic_set(&ddb_entry->relogin_retry_count, 0);
41047+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41048 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41049 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41050 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41051@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41052 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41053 atomic_set(&ddb_entry->port_down_timer,
41054 ha->port_down_retry_count);
41055- atomic_set(&ddb_entry->relogin_retry_count, 0);
41056+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41057 atomic_set(&ddb_entry->relogin_timer, 0);
41058 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41059 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41060diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41061index 83c8b5e..a82b348 100644
41062--- a/drivers/scsi/qla4xxx/ql4_os.c
41063+++ b/drivers/scsi/qla4xxx/ql4_os.c
41064@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41065 ddb_entry->fw_ddb_device_state ==
41066 DDB_DS_SESSION_FAILED) {
41067 /* Reset retry relogin timer */
41068- atomic_inc(&ddb_entry->relogin_retry_count);
41069+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41070 DEBUG2(printk("scsi%ld: index[%d] relogin"
41071 " timed out-retrying"
41072 " relogin (%d)\n",
41073 ha->host_no,
41074 ddb_entry->fw_ddb_index,
41075- atomic_read(&ddb_entry->
41076+ atomic_read_unchecked(&ddb_entry->
41077 relogin_retry_count))
41078 );
41079 start_dpc++;
41080diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41081index dd098ca..686ce01 100644
41082--- a/drivers/scsi/scsi.c
41083+++ b/drivers/scsi/scsi.c
41084@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41085 unsigned long timeout;
41086 int rtn = 0;
41087
41088- atomic_inc(&cmd->device->iorequest_cnt);
41089+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41090
41091 /* check if the device is still usable */
41092 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41093diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41094index bc3e363..e1a8e50 100644
41095--- a/drivers/scsi/scsi_debug.c
41096+++ b/drivers/scsi/scsi_debug.c
41097@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41098 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41099 unsigned char *cmd = (unsigned char *)scp->cmnd;
41100
41101+ pax_track_stack();
41102+
41103 if ((errsts = check_readiness(scp, 1, devip)))
41104 return errsts;
41105 memset(arr, 0, sizeof(arr));
41106@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41107 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41108 unsigned char *cmd = (unsigned char *)scp->cmnd;
41109
41110+ pax_track_stack();
41111+
41112 if ((errsts = check_readiness(scp, 1, devip)))
41113 return errsts;
41114 memset(arr, 0, sizeof(arr));
41115diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41116index 1ae7b7c..0a44924 100644
41117--- a/drivers/scsi/scsi_lib.c
41118+++ b/drivers/scsi/scsi_lib.c
41119@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41120
41121 scsi_init_cmd_errh(cmd);
41122 cmd->result = DID_NO_CONNECT << 16;
41123- atomic_inc(&cmd->device->iorequest_cnt);
41124+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41125
41126 /*
41127 * SCSI request completion path will do scsi_device_unbusy(),
41128@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct request *rq)
41129 */
41130 cmd->serial_number = 0;
41131
41132- atomic_inc(&cmd->device->iodone_cnt);
41133+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41134 if (cmd->result)
41135- atomic_inc(&cmd->device->ioerr_cnt);
41136+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41137
41138 disposition = scsi_decide_disposition(cmd);
41139 if (disposition != SUCCESS &&
41140diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41141index 91a93e0..eae0fe3 100644
41142--- a/drivers/scsi/scsi_sysfs.c
41143+++ b/drivers/scsi/scsi_sysfs.c
41144@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41145 char *buf) \
41146 { \
41147 struct scsi_device *sdev = to_scsi_device(dev); \
41148- unsigned long long count = atomic_read(&sdev->field); \
41149+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41150 return snprintf(buf, 20, "0x%llx\n", count); \
41151 } \
41152 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41153diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41154index 1030327..f91fd30 100644
41155--- a/drivers/scsi/scsi_tgt_lib.c
41156+++ b/drivers/scsi/scsi_tgt_lib.c
41157@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41158 int err;
41159
41160 dprintk("%lx %u\n", uaddr, len);
41161- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41162+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41163 if (err) {
41164 /*
41165 * TODO: need to fixup sg_tablesize, max_segment_size,
41166diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41167index db02e31..1b42ea9 100644
41168--- a/drivers/scsi/scsi_transport_fc.c
41169+++ b/drivers/scsi/scsi_transport_fc.c
41170@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41171 * Netlink Infrastructure
41172 */
41173
41174-static atomic_t fc_event_seq;
41175+static atomic_unchecked_t fc_event_seq;
41176
41177 /**
41178 * fc_get_event_number - Obtain the next sequential FC event number
41179@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41180 u32
41181 fc_get_event_number(void)
41182 {
41183- return atomic_add_return(1, &fc_event_seq);
41184+ return atomic_add_return_unchecked(1, &fc_event_seq);
41185 }
41186 EXPORT_SYMBOL(fc_get_event_number);
41187
41188@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41189 {
41190 int error;
41191
41192- atomic_set(&fc_event_seq, 0);
41193+ atomic_set_unchecked(&fc_event_seq, 0);
41194
41195 error = transport_class_register(&fc_host_class);
41196 if (error)
41197diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41198index de2f8c4..63c5278 100644
41199--- a/drivers/scsi/scsi_transport_iscsi.c
41200+++ b/drivers/scsi/scsi_transport_iscsi.c
41201@@ -81,7 +81,7 @@ struct iscsi_internal {
41202 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41203 };
41204
41205-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41206+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41207 static struct workqueue_struct *iscsi_eh_timer_workq;
41208
41209 /*
41210@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41211 int err;
41212
41213 ihost = shost->shost_data;
41214- session->sid = atomic_add_return(1, &iscsi_session_nr);
41215+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41216
41217 if (id == ISCSI_MAX_TARGET) {
41218 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41219@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41220 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41221 ISCSI_TRANSPORT_VERSION);
41222
41223- atomic_set(&iscsi_session_nr, 0);
41224+ atomic_set_unchecked(&iscsi_session_nr, 0);
41225
41226 err = class_register(&iscsi_transport_class);
41227 if (err)
41228diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41229index 21a045e..ec89e03 100644
41230--- a/drivers/scsi/scsi_transport_srp.c
41231+++ b/drivers/scsi/scsi_transport_srp.c
41232@@ -33,7 +33,7 @@
41233 #include "scsi_transport_srp_internal.h"
41234
41235 struct srp_host_attrs {
41236- atomic_t next_port_id;
41237+ atomic_unchecked_t next_port_id;
41238 };
41239 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41240
41241@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41242 struct Scsi_Host *shost = dev_to_shost(dev);
41243 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41244
41245- atomic_set(&srp_host->next_port_id, 0);
41246+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41247 return 0;
41248 }
41249
41250@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41251 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41252 rport->roles = ids->roles;
41253
41254- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41255+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41256 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41257
41258 transport_setup_device(&rport->dev);
41259diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41260index 040f751..98a5ed2 100644
41261--- a/drivers/scsi/sg.c
41262+++ b/drivers/scsi/sg.c
41263@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41264 sdp->disk->disk_name,
41265 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41266 NULL,
41267- (char *)arg);
41268+ (char __user *)arg);
41269 case BLKTRACESTART:
41270 return blk_trace_startstop(sdp->device->request_queue, 1);
41271 case BLKTRACESTOP:
41272@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41273 const struct file_operations * fops;
41274 };
41275
41276-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41277+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41278 {"allow_dio", &adio_fops},
41279 {"debug", &debug_fops},
41280 {"def_reserved_size", &dressz_fops},
41281@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41282 {
41283 int k, mask;
41284 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41285- struct sg_proc_leaf * leaf;
41286+ const struct sg_proc_leaf * leaf;
41287
41288 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41289 if (!sg_proc_sgp)
41290diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41291index 45374d6..61ee484 100644
41292--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41293+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41294@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41295 int do_iounmap = 0;
41296 int do_disable_device = 1;
41297
41298+ pax_track_stack();
41299+
41300 memset(&sym_dev, 0, sizeof(sym_dev));
41301 memset(&nvram, 0, sizeof(nvram));
41302 sym_dev.pdev = pdev;
41303diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41304index eadc1ab..2d81457 100644
41305--- a/drivers/serial/kgdboc.c
41306+++ b/drivers/serial/kgdboc.c
41307@@ -18,7 +18,7 @@
41308
41309 #define MAX_CONFIG_LEN 40
41310
41311-static struct kgdb_io kgdboc_io_ops;
41312+static const struct kgdb_io kgdboc_io_ops;
41313
41314 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41315 static int configured = -1;
41316@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41317 module_put(THIS_MODULE);
41318 }
41319
41320-static struct kgdb_io kgdboc_io_ops = {
41321+static const struct kgdb_io kgdboc_io_ops = {
41322 .name = "kgdboc",
41323 .read_char = kgdboc_get_char,
41324 .write_char = kgdboc_put_char,
41325diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41326index b76f246..7f41af7 100644
41327--- a/drivers/spi/spi.c
41328+++ b/drivers/spi/spi.c
41329@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41330 EXPORT_SYMBOL_GPL(spi_sync);
41331
41332 /* portable code must never pass more than 32 bytes */
41333-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41334+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41335
41336 static u8 *buf;
41337
41338diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41339index 99010d4..6bad87b 100644
41340--- a/drivers/staging/android/binder.c
41341+++ b/drivers/staging/android/binder.c
41342@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41343 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41344 }
41345
41346-static struct vm_operations_struct binder_vm_ops = {
41347+static const struct vm_operations_struct binder_vm_ops = {
41348 .open = binder_vma_open,
41349 .close = binder_vma_close,
41350 };
41351diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41352index cda26bb..39fed3f 100644
41353--- a/drivers/staging/b3dfg/b3dfg.c
41354+++ b/drivers/staging/b3dfg/b3dfg.c
41355@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41356 return VM_FAULT_NOPAGE;
41357 }
41358
41359-static struct vm_operations_struct b3dfg_vm_ops = {
41360+static const struct vm_operations_struct b3dfg_vm_ops = {
41361 .fault = b3dfg_vma_fault,
41362 };
41363
41364@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41365 return r;
41366 }
41367
41368-static struct file_operations b3dfg_fops = {
41369+static const struct file_operations b3dfg_fops = {
41370 .owner = THIS_MODULE,
41371 .open = b3dfg_open,
41372 .release = b3dfg_release,
41373diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41374index 80a1071..8c14e17 100644
41375--- a/drivers/staging/comedi/comedi_fops.c
41376+++ b/drivers/staging/comedi/comedi_fops.c
41377@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41378 mutex_unlock(&dev->mutex);
41379 }
41380
41381-static struct vm_operations_struct comedi_vm_ops = {
41382+static const struct vm_operations_struct comedi_vm_ops = {
41383 .close = comedi_unmap,
41384 };
41385
41386diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41387index e55a0db..577b776 100644
41388--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41389+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41390@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41391 static dev_t adsp_devno;
41392 static struct class *adsp_class;
41393
41394-static struct file_operations adsp_fops = {
41395+static const struct file_operations adsp_fops = {
41396 .owner = THIS_MODULE,
41397 .open = adsp_open,
41398 .unlocked_ioctl = adsp_ioctl,
41399diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41400index ad2390f..4116ee8 100644
41401--- a/drivers/staging/dream/qdsp5/audio_aac.c
41402+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41403@@ -1022,7 +1022,7 @@ done:
41404 return rc;
41405 }
41406
41407-static struct file_operations audio_aac_fops = {
41408+static const struct file_operations audio_aac_fops = {
41409 .owner = THIS_MODULE,
41410 .open = audio_open,
41411 .release = audio_release,
41412diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41413index cd818a5..870b37b 100644
41414--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41415+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41416@@ -833,7 +833,7 @@ done:
41417 return rc;
41418 }
41419
41420-static struct file_operations audio_amrnb_fops = {
41421+static const struct file_operations audio_amrnb_fops = {
41422 .owner = THIS_MODULE,
41423 .open = audamrnb_open,
41424 .release = audamrnb_release,
41425diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41426index 4b43e18..cedafda 100644
41427--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41428+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41429@@ -805,7 +805,7 @@ dma_fail:
41430 return rc;
41431 }
41432
41433-static struct file_operations audio_evrc_fops = {
41434+static const struct file_operations audio_evrc_fops = {
41435 .owner = THIS_MODULE,
41436 .open = audevrc_open,
41437 .release = audevrc_release,
41438diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41439index 3d950a2..9431118 100644
41440--- a/drivers/staging/dream/qdsp5/audio_in.c
41441+++ b/drivers/staging/dream/qdsp5/audio_in.c
41442@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41443 return 0;
41444 }
41445
41446-static struct file_operations audio_fops = {
41447+static const struct file_operations audio_fops = {
41448 .owner = THIS_MODULE,
41449 .open = audio_in_open,
41450 .release = audio_in_release,
41451@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41452 .unlocked_ioctl = audio_in_ioctl,
41453 };
41454
41455-static struct file_operations audpre_fops = {
41456+static const struct file_operations audpre_fops = {
41457 .owner = THIS_MODULE,
41458 .open = audpre_open,
41459 .unlocked_ioctl = audpre_ioctl,
41460diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41461index b95574f..286c2f4 100644
41462--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41463+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41464@@ -941,7 +941,7 @@ done:
41465 return rc;
41466 }
41467
41468-static struct file_operations audio_mp3_fops = {
41469+static const struct file_operations audio_mp3_fops = {
41470 .owner = THIS_MODULE,
41471 .open = audio_open,
41472 .release = audio_release,
41473diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41474index d1adcf6..f8f9833 100644
41475--- a/drivers/staging/dream/qdsp5/audio_out.c
41476+++ b/drivers/staging/dream/qdsp5/audio_out.c
41477@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41478 return 0;
41479 }
41480
41481-static struct file_operations audio_fops = {
41482+static const struct file_operations audio_fops = {
41483 .owner = THIS_MODULE,
41484 .open = audio_open,
41485 .release = audio_release,
41486@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41487 .unlocked_ioctl = audio_ioctl,
41488 };
41489
41490-static struct file_operations audpp_fops = {
41491+static const struct file_operations audpp_fops = {
41492 .owner = THIS_MODULE,
41493 .open = audpp_open,
41494 .unlocked_ioctl = audpp_ioctl,
41495diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41496index f0f50e3..f6b9dbc 100644
41497--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41498+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41499@@ -816,7 +816,7 @@ err:
41500 return rc;
41501 }
41502
41503-static struct file_operations audio_qcelp_fops = {
41504+static const struct file_operations audio_qcelp_fops = {
41505 .owner = THIS_MODULE,
41506 .open = audqcelp_open,
41507 .release = audqcelp_release,
41508diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41509index 037d7ff..5469ec3 100644
41510--- a/drivers/staging/dream/qdsp5/snd.c
41511+++ b/drivers/staging/dream/qdsp5/snd.c
41512@@ -242,7 +242,7 @@ err:
41513 return rc;
41514 }
41515
41516-static struct file_operations snd_fops = {
41517+static const struct file_operations snd_fops = {
41518 .owner = THIS_MODULE,
41519 .open = snd_open,
41520 .release = snd_release,
41521diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41522index d4e7d88..0ea632a 100644
41523--- a/drivers/staging/dream/smd/smd_qmi.c
41524+++ b/drivers/staging/dream/smd/smd_qmi.c
41525@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41526 return 0;
41527 }
41528
41529-static struct file_operations qmi_fops = {
41530+static const struct file_operations qmi_fops = {
41531 .owner = THIS_MODULE,
41532 .read = qmi_read,
41533 .write = qmi_write,
41534diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41535index cd3910b..ff053d3 100644
41536--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41537+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41538@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41539 return rc;
41540 }
41541
41542-static struct file_operations rpcrouter_server_fops = {
41543+static const struct file_operations rpcrouter_server_fops = {
41544 .owner = THIS_MODULE,
41545 .open = rpcrouter_open,
41546 .release = rpcrouter_release,
41547@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41548 .unlocked_ioctl = rpcrouter_ioctl,
41549 };
41550
41551-static struct file_operations rpcrouter_router_fops = {
41552+static const struct file_operations rpcrouter_router_fops = {
41553 .owner = THIS_MODULE,
41554 .open = rpcrouter_open,
41555 .release = rpcrouter_release,
41556diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41557index c24e4e0..07665be 100644
41558--- a/drivers/staging/dst/dcore.c
41559+++ b/drivers/staging/dst/dcore.c
41560@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41561 return 0;
41562 }
41563
41564-static struct block_device_operations dst_blk_ops = {
41565+static const struct block_device_operations dst_blk_ops = {
41566 .open = dst_bdev_open,
41567 .release = dst_bdev_release,
41568 .owner = THIS_MODULE,
41569@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41570 n->size = ctl->size;
41571
41572 atomic_set(&n->refcnt, 1);
41573- atomic_long_set(&n->gen, 0);
41574+ atomic_long_set_unchecked(&n->gen, 0);
41575 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41576
41577 err = dst_node_sysfs_init(n);
41578diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41579index 557d372..8d84422 100644
41580--- a/drivers/staging/dst/trans.c
41581+++ b/drivers/staging/dst/trans.c
41582@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41583 t->error = 0;
41584 t->retries = 0;
41585 atomic_set(&t->refcnt, 1);
41586- t->gen = atomic_long_inc_return(&n->gen);
41587+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
41588
41589 t->enc = bio_data_dir(bio);
41590 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41591diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41592index 94f7752..d051514 100644
41593--- a/drivers/staging/et131x/et1310_tx.c
41594+++ b/drivers/staging/et131x/et1310_tx.c
41595@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41596 struct net_device_stats *stats = &etdev->net_stats;
41597
41598 if (pMpTcb->Flags & fMP_DEST_BROAD)
41599- atomic_inc(&etdev->Stats.brdcstxmt);
41600+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41601 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41602- atomic_inc(&etdev->Stats.multixmt);
41603+ atomic_inc_unchecked(&etdev->Stats.multixmt);
41604 else
41605- atomic_inc(&etdev->Stats.unixmt);
41606+ atomic_inc_unchecked(&etdev->Stats.unixmt);
41607
41608 if (pMpTcb->Packet) {
41609 stats->tx_bytes += pMpTcb->Packet->len;
41610diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41611index 1dfe06f..f469b4d 100644
41612--- a/drivers/staging/et131x/et131x_adapter.h
41613+++ b/drivers/staging/et131x/et131x_adapter.h
41614@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41615 * operations
41616 */
41617 u32 unircv; /* # multicast packets received */
41618- atomic_t unixmt; /* # multicast packets for Tx */
41619+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41620 u32 multircv; /* # multicast packets received */
41621- atomic_t multixmt; /* # multicast packets for Tx */
41622+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41623 u32 brdcstrcv; /* # broadcast packets received */
41624- atomic_t brdcstxmt; /* # broadcast packets for Tx */
41625+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41626 u32 norcvbuf; /* # Rx packets discarded */
41627 u32 noxmtbuf; /* # Tx packets discarded */
41628
41629diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41630index 4bd353a..e28f455 100644
41631--- a/drivers/staging/go7007/go7007-v4l2.c
41632+++ b/drivers/staging/go7007/go7007-v4l2.c
41633@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41634 return 0;
41635 }
41636
41637-static struct vm_operations_struct go7007_vm_ops = {
41638+static const struct vm_operations_struct go7007_vm_ops = {
41639 .open = go7007_vm_open,
41640 .close = go7007_vm_close,
41641 .fault = go7007_vm_fault,
41642diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41643index 366dc95..b974d87 100644
41644--- a/drivers/staging/hv/Channel.c
41645+++ b/drivers/staging/hv/Channel.c
41646@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41647
41648 DPRINT_ENTER(VMBUS);
41649
41650- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41651- atomic_inc(&gVmbusConnection.NextGpadlHandle);
41652+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41653+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41654
41655 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41656 ASSERT(msgInfo != NULL);
41657diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41658index b12237f..01ae28a 100644
41659--- a/drivers/staging/hv/Hv.c
41660+++ b/drivers/staging/hv/Hv.c
41661@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41662 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41663 u32 outputAddressHi = outputAddress >> 32;
41664 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41665- volatile void *hypercallPage = gHvContext.HypercallPage;
41666+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41667
41668 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41669 Control, Input, Output);
41670diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41671index d089bb1..2ebc158 100644
41672--- a/drivers/staging/hv/VmbusApi.h
41673+++ b/drivers/staging/hv/VmbusApi.h
41674@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41675 u32 *GpadlHandle);
41676 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41677 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41678-};
41679+} __no_const;
41680
41681 /* Base driver object */
41682 struct hv_driver {
41683diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41684index 5a37cce..6ecc88c 100644
41685--- a/drivers/staging/hv/VmbusPrivate.h
41686+++ b/drivers/staging/hv/VmbusPrivate.h
41687@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41688 struct VMBUS_CONNECTION {
41689 enum VMBUS_CONNECT_STATE ConnectState;
41690
41691- atomic_t NextGpadlHandle;
41692+ atomic_unchecked_t NextGpadlHandle;
41693
41694 /*
41695 * Represents channel interrupts. Each bit position represents a
41696diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41697index 871a202..ca50ddf 100644
41698--- a/drivers/staging/hv/blkvsc_drv.c
41699+++ b/drivers/staging/hv/blkvsc_drv.c
41700@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41701 /* The one and only one */
41702 static struct blkvsc_driver_context g_blkvsc_drv;
41703
41704-static struct block_device_operations block_ops = {
41705+static const struct block_device_operations block_ops = {
41706 .owner = THIS_MODULE,
41707 .open = blkvsc_open,
41708 .release = blkvsc_release,
41709diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41710index 6acc49a..fbc8d46 100644
41711--- a/drivers/staging/hv/vmbus_drv.c
41712+++ b/drivers/staging/hv/vmbus_drv.c
41713@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41714 to_device_context(root_device_obj);
41715 struct device_context *child_device_ctx =
41716 to_device_context(child_device_obj);
41717- static atomic_t device_num = ATOMIC_INIT(0);
41718+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41719
41720 DPRINT_ENTER(VMBUS_DRV);
41721
41722@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41723
41724 /* Set the device name. Otherwise, device_register() will fail. */
41725 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41726- atomic_inc_return(&device_num));
41727+ atomic_inc_return_unchecked(&device_num));
41728
41729 /* The new device belongs to this bus */
41730 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41731diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41732index d926189..17b19fd 100644
41733--- a/drivers/staging/iio/ring_generic.h
41734+++ b/drivers/staging/iio/ring_generic.h
41735@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41736
41737 int (*is_enabled)(struct iio_ring_buffer *ring);
41738 int (*enable)(struct iio_ring_buffer *ring);
41739-};
41740+} __no_const;
41741
41742 /**
41743 * struct iio_ring_buffer - general ring buffer structure
41744diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41745index 1b237b7..88c624e 100644
41746--- a/drivers/staging/octeon/ethernet-rx.c
41747+++ b/drivers/staging/octeon/ethernet-rx.c
41748@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41749 /* Increment RX stats for virtual ports */
41750 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41751 #ifdef CONFIG_64BIT
41752- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41753- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41754+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41755+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41756 #else
41757- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41758- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41759+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41760+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
41761 #endif
41762 }
41763 netif_receive_skb(skb);
41764@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41765 dev->name);
41766 */
41767 #ifdef CONFIG_64BIT
41768- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
41769+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
41770 #else
41771- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
41772+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
41773 #endif
41774 dev_kfree_skb_irq(skb);
41775 }
41776diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
41777index 492c502..d9909f1 100644
41778--- a/drivers/staging/octeon/ethernet.c
41779+++ b/drivers/staging/octeon/ethernet.c
41780@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
41781 * since the RX tasklet also increments it.
41782 */
41783 #ifdef CONFIG_64BIT
41784- atomic64_add(rx_status.dropped_packets,
41785- (atomic64_t *)&priv->stats.rx_dropped);
41786+ atomic64_add_unchecked(rx_status.dropped_packets,
41787+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41788 #else
41789- atomic_add(rx_status.dropped_packets,
41790- (atomic_t *)&priv->stats.rx_dropped);
41791+ atomic_add_unchecked(rx_status.dropped_packets,
41792+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
41793 #endif
41794 }
41795
41796diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
41797index a35bd5d..28fff45 100644
41798--- a/drivers/staging/otus/80211core/pub_zfi.h
41799+++ b/drivers/staging/otus/80211core/pub_zfi.h
41800@@ -531,7 +531,7 @@ struct zsCbFuncTbl
41801 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
41802
41803 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
41804-};
41805+} __no_const;
41806
41807 extern void zfZeroMemory(u8_t* va, u16_t length);
41808 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
41809diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
41810index c39a25f..696f5aa 100644
41811--- a/drivers/staging/panel/panel.c
41812+++ b/drivers/staging/panel/panel.c
41813@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
41814 return 0;
41815 }
41816
41817-static struct file_operations lcd_fops = {
41818+static const struct file_operations lcd_fops = {
41819 .write = lcd_write,
41820 .open = lcd_open,
41821 .release = lcd_release,
41822@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
41823 return 0;
41824 }
41825
41826-static struct file_operations keypad_fops = {
41827+static const struct file_operations keypad_fops = {
41828 .read = keypad_read, /* read */
41829 .open = keypad_open, /* open */
41830 .release = keypad_release, /* close */
41831diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
41832index 270ebcb..37e46af 100644
41833--- a/drivers/staging/phison/phison.c
41834+++ b/drivers/staging/phison/phison.c
41835@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
41836 ATA_BMDMA_SHT(DRV_NAME),
41837 };
41838
41839-static struct ata_port_operations phison_ops = {
41840+static const struct ata_port_operations phison_ops = {
41841 .inherits = &ata_bmdma_port_ops,
41842 .prereset = phison_pre_reset,
41843 };
41844diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
41845index 2eb8e3d..57616a7 100644
41846--- a/drivers/staging/poch/poch.c
41847+++ b/drivers/staging/poch/poch.c
41848@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
41849 return 0;
41850 }
41851
41852-static struct file_operations poch_fops = {
41853+static const struct file_operations poch_fops = {
41854 .owner = THIS_MODULE,
41855 .open = poch_open,
41856 .release = poch_release,
41857diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
41858index c94de31..19402bc 100644
41859--- a/drivers/staging/pohmelfs/inode.c
41860+++ b/drivers/staging/pohmelfs/inode.c
41861@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41862 mutex_init(&psb->mcache_lock);
41863 psb->mcache_root = RB_ROOT;
41864 psb->mcache_timeout = msecs_to_jiffies(5000);
41865- atomic_long_set(&psb->mcache_gen, 0);
41866+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
41867
41868 psb->trans_max_pages = 100;
41869
41870@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41871 INIT_LIST_HEAD(&psb->crypto_ready_list);
41872 INIT_LIST_HEAD(&psb->crypto_active_list);
41873
41874- atomic_set(&psb->trans_gen, 1);
41875+ atomic_set_unchecked(&psb->trans_gen, 1);
41876 atomic_long_set(&psb->total_inodes, 0);
41877
41878 mutex_init(&psb->state_lock);
41879diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
41880index e22665c..a2a9390 100644
41881--- a/drivers/staging/pohmelfs/mcache.c
41882+++ b/drivers/staging/pohmelfs/mcache.c
41883@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
41884 m->data = data;
41885 m->start = start;
41886 m->size = size;
41887- m->gen = atomic_long_inc_return(&psb->mcache_gen);
41888+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
41889
41890 mutex_lock(&psb->mcache_lock);
41891 err = pohmelfs_mcache_insert(psb, m);
41892diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
41893index 623a07d..4035c19 100644
41894--- a/drivers/staging/pohmelfs/netfs.h
41895+++ b/drivers/staging/pohmelfs/netfs.h
41896@@ -570,14 +570,14 @@ struct pohmelfs_config;
41897 struct pohmelfs_sb {
41898 struct rb_root mcache_root;
41899 struct mutex mcache_lock;
41900- atomic_long_t mcache_gen;
41901+ atomic_long_unchecked_t mcache_gen;
41902 unsigned long mcache_timeout;
41903
41904 unsigned int idx;
41905
41906 unsigned int trans_retries;
41907
41908- atomic_t trans_gen;
41909+ atomic_unchecked_t trans_gen;
41910
41911 unsigned int crypto_attached_size;
41912 unsigned int crypto_align_size;
41913diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
41914index 36a2535..0591bf4 100644
41915--- a/drivers/staging/pohmelfs/trans.c
41916+++ b/drivers/staging/pohmelfs/trans.c
41917@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
41918 int err;
41919 struct netfs_cmd *cmd = t->iovec.iov_base;
41920
41921- t->gen = atomic_inc_return(&psb->trans_gen);
41922+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
41923
41924 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
41925 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
41926diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
41927index f890a16..509ece8 100644
41928--- a/drivers/staging/sep/sep_driver.c
41929+++ b/drivers/staging/sep/sep_driver.c
41930@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
41931 static dev_t sep_devno;
41932
41933 /* the files operations structure of the driver */
41934-static struct file_operations sep_file_operations = {
41935+static const struct file_operations sep_file_operations = {
41936 .owner = THIS_MODULE,
41937 .ioctl = sep_ioctl,
41938 .poll = sep_poll,
41939diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
41940index 5e16bc3..7655b10 100644
41941--- a/drivers/staging/usbip/usbip_common.h
41942+++ b/drivers/staging/usbip/usbip_common.h
41943@@ -374,7 +374,7 @@ struct usbip_device {
41944 void (*shutdown)(struct usbip_device *);
41945 void (*reset)(struct usbip_device *);
41946 void (*unusable)(struct usbip_device *);
41947- } eh_ops;
41948+ } __no_const eh_ops;
41949 };
41950
41951
41952diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
41953index 57f7946..d9df23d 100644
41954--- a/drivers/staging/usbip/vhci.h
41955+++ b/drivers/staging/usbip/vhci.h
41956@@ -92,7 +92,7 @@ struct vhci_hcd {
41957 unsigned resuming:1;
41958 unsigned long re_timeout;
41959
41960- atomic_t seqnum;
41961+ atomic_unchecked_t seqnum;
41962
41963 /*
41964 * NOTE:
41965diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
41966index 20cd7db..c2693ff 100644
41967--- a/drivers/staging/usbip/vhci_hcd.c
41968+++ b/drivers/staging/usbip/vhci_hcd.c
41969@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
41970 return;
41971 }
41972
41973- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
41974+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
41975 if (priv->seqnum == 0xffff)
41976 usbip_uinfo("seqnum max\n");
41977
41978@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
41979 return -ENOMEM;
41980 }
41981
41982- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
41983+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
41984 if (unlink->seqnum == 0xffff)
41985 usbip_uinfo("seqnum max\n");
41986
41987@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
41988 vdev->rhport = rhport;
41989 }
41990
41991- atomic_set(&vhci->seqnum, 0);
41992+ atomic_set_unchecked(&vhci->seqnum, 0);
41993 spin_lock_init(&vhci->lock);
41994
41995
41996diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
41997index 8ed5206..92469e3 100644
41998--- a/drivers/staging/usbip/vhci_rx.c
41999+++ b/drivers/staging/usbip/vhci_rx.c
42000@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42001 usbip_uerr("cannot find a urb of seqnum %u\n",
42002 pdu->base.seqnum);
42003 usbip_uinfo("max seqnum %d\n",
42004- atomic_read(&the_controller->seqnum));
42005+ atomic_read_unchecked(&the_controller->seqnum));
42006 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42007 return;
42008 }
42009diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42010index 7891288..8e31300 100644
42011--- a/drivers/staging/vme/devices/vme_user.c
42012+++ b/drivers/staging/vme/devices/vme_user.c
42013@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42014 static int __init vme_user_probe(struct device *, int, int);
42015 static int __exit vme_user_remove(struct device *, int, int);
42016
42017-static struct file_operations vme_user_fops = {
42018+static const struct file_operations vme_user_fops = {
42019 .open = vme_user_open,
42020 .release = vme_user_release,
42021 .read = vme_user_read,
42022diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42023index 58abf44..00c1fc8 100644
42024--- a/drivers/staging/vt6655/hostap.c
42025+++ b/drivers/staging/vt6655/hostap.c
42026@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42027 PSDevice apdev_priv;
42028 struct net_device *dev = pDevice->dev;
42029 int ret;
42030- const struct net_device_ops apdev_netdev_ops = {
42031+ net_device_ops_no_const apdev_netdev_ops = {
42032 .ndo_start_xmit = pDevice->tx_80211,
42033 };
42034
42035diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42036index 0c8267a..db1f363 100644
42037--- a/drivers/staging/vt6656/hostap.c
42038+++ b/drivers/staging/vt6656/hostap.c
42039@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42040 PSDevice apdev_priv;
42041 struct net_device *dev = pDevice->dev;
42042 int ret;
42043- const struct net_device_ops apdev_netdev_ops = {
42044+ net_device_ops_no_const apdev_netdev_ops = {
42045 .ndo_start_xmit = pDevice->tx_80211,
42046 };
42047
42048diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42049index 925678b..da7f5ed 100644
42050--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42051+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42052@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42053
42054 struct usbctlx_completor {
42055 int (*complete) (struct usbctlx_completor *);
42056-};
42057+} __no_const;
42058 typedef struct usbctlx_completor usbctlx_completor_t;
42059
42060 static int
42061diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42062index 40de151..924f268 100644
42063--- a/drivers/telephony/ixj.c
42064+++ b/drivers/telephony/ixj.c
42065@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42066 bool mContinue;
42067 char *pIn, *pOut;
42068
42069+ pax_track_stack();
42070+
42071 if (!SCI_Prepare(j))
42072 return 0;
42073
42074diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42075index e941367..b631f5a 100644
42076--- a/drivers/uio/uio.c
42077+++ b/drivers/uio/uio.c
42078@@ -23,6 +23,7 @@
42079 #include <linux/string.h>
42080 #include <linux/kobject.h>
42081 #include <linux/uio_driver.h>
42082+#include <asm/local.h>
42083
42084 #define UIO_MAX_DEVICES 255
42085
42086@@ -30,10 +31,10 @@ struct uio_device {
42087 struct module *owner;
42088 struct device *dev;
42089 int minor;
42090- atomic_t event;
42091+ atomic_unchecked_t event;
42092 struct fasync_struct *async_queue;
42093 wait_queue_head_t wait;
42094- int vma_count;
42095+ local_t vma_count;
42096 struct uio_info *info;
42097 struct kobject *map_dir;
42098 struct kobject *portio_dir;
42099@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42100 return entry->show(mem, buf);
42101 }
42102
42103-static struct sysfs_ops map_sysfs_ops = {
42104+static const struct sysfs_ops map_sysfs_ops = {
42105 .show = map_type_show,
42106 };
42107
42108@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42109 return entry->show(port, buf);
42110 }
42111
42112-static struct sysfs_ops portio_sysfs_ops = {
42113+static const struct sysfs_ops portio_sysfs_ops = {
42114 .show = portio_type_show,
42115 };
42116
42117@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42118 struct uio_device *idev = dev_get_drvdata(dev);
42119 if (idev)
42120 return sprintf(buf, "%u\n",
42121- (unsigned int)atomic_read(&idev->event));
42122+ (unsigned int)atomic_read_unchecked(&idev->event));
42123 else
42124 return -ENODEV;
42125 }
42126@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42127 {
42128 struct uio_device *idev = info->uio_dev;
42129
42130- atomic_inc(&idev->event);
42131+ atomic_inc_unchecked(&idev->event);
42132 wake_up_interruptible(&idev->wait);
42133 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42134 }
42135@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42136 }
42137
42138 listener->dev = idev;
42139- listener->event_count = atomic_read(&idev->event);
42140+ listener->event_count = atomic_read_unchecked(&idev->event);
42141 filep->private_data = listener;
42142
42143 if (idev->info->open) {
42144@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42145 return -EIO;
42146
42147 poll_wait(filep, &idev->wait, wait);
42148- if (listener->event_count != atomic_read(&idev->event))
42149+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42150 return POLLIN | POLLRDNORM;
42151 return 0;
42152 }
42153@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42154 do {
42155 set_current_state(TASK_INTERRUPTIBLE);
42156
42157- event_count = atomic_read(&idev->event);
42158+ event_count = atomic_read_unchecked(&idev->event);
42159 if (event_count != listener->event_count) {
42160 if (copy_to_user(buf, &event_count, count))
42161 retval = -EFAULT;
42162@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42163 static void uio_vma_open(struct vm_area_struct *vma)
42164 {
42165 struct uio_device *idev = vma->vm_private_data;
42166- idev->vma_count++;
42167+ local_inc(&idev->vma_count);
42168 }
42169
42170 static void uio_vma_close(struct vm_area_struct *vma)
42171 {
42172 struct uio_device *idev = vma->vm_private_data;
42173- idev->vma_count--;
42174+ local_dec(&idev->vma_count);
42175 }
42176
42177 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42178@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42179 idev->owner = owner;
42180 idev->info = info;
42181 init_waitqueue_head(&idev->wait);
42182- atomic_set(&idev->event, 0);
42183+ atomic_set_unchecked(&idev->event, 0);
42184
42185 ret = uio_get_minor(idev);
42186 if (ret)
42187diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42188index fbea856..06efea6 100644
42189--- a/drivers/usb/atm/usbatm.c
42190+++ b/drivers/usb/atm/usbatm.c
42191@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42192 if (printk_ratelimit())
42193 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42194 __func__, vpi, vci);
42195- atomic_inc(&vcc->stats->rx_err);
42196+ atomic_inc_unchecked(&vcc->stats->rx_err);
42197 return;
42198 }
42199
42200@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42201 if (length > ATM_MAX_AAL5_PDU) {
42202 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42203 __func__, length, vcc);
42204- atomic_inc(&vcc->stats->rx_err);
42205+ atomic_inc_unchecked(&vcc->stats->rx_err);
42206 goto out;
42207 }
42208
42209@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42210 if (sarb->len < pdu_length) {
42211 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42212 __func__, pdu_length, sarb->len, vcc);
42213- atomic_inc(&vcc->stats->rx_err);
42214+ atomic_inc_unchecked(&vcc->stats->rx_err);
42215 goto out;
42216 }
42217
42218 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42219 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42220 __func__, vcc);
42221- atomic_inc(&vcc->stats->rx_err);
42222+ atomic_inc_unchecked(&vcc->stats->rx_err);
42223 goto out;
42224 }
42225
42226@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42227 if (printk_ratelimit())
42228 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42229 __func__, length);
42230- atomic_inc(&vcc->stats->rx_drop);
42231+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42232 goto out;
42233 }
42234
42235@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42236
42237 vcc->push(vcc, skb);
42238
42239- atomic_inc(&vcc->stats->rx);
42240+ atomic_inc_unchecked(&vcc->stats->rx);
42241 out:
42242 skb_trim(sarb, 0);
42243 }
42244@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42245 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42246
42247 usbatm_pop(vcc, skb);
42248- atomic_inc(&vcc->stats->tx);
42249+ atomic_inc_unchecked(&vcc->stats->tx);
42250
42251 skb = skb_dequeue(&instance->sndqueue);
42252 }
42253@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42254 if (!left--)
42255 return sprintf(page,
42256 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42257- atomic_read(&atm_dev->stats.aal5.tx),
42258- atomic_read(&atm_dev->stats.aal5.tx_err),
42259- atomic_read(&atm_dev->stats.aal5.rx),
42260- atomic_read(&atm_dev->stats.aal5.rx_err),
42261- atomic_read(&atm_dev->stats.aal5.rx_drop));
42262+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42263+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42264+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42265+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42266+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42267
42268 if (!left--) {
42269 if (instance->disconnected)
42270diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
42271index 3e564bf..949b448 100644
42272--- a/drivers/usb/class/cdc-wdm.c
42273+++ b/drivers/usb/class/cdc-wdm.c
42274@@ -314,7 +314,7 @@ static ssize_t wdm_write
42275 if (r < 0)
42276 goto outnp;
42277
42278- if (!file->f_flags && O_NONBLOCK)
42279+ if (!(file->f_flags & O_NONBLOCK))
42280 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
42281 &desc->flags));
42282 else
42283diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42284index 24e6205..fe5a5d4 100644
42285--- a/drivers/usb/core/hcd.c
42286+++ b/drivers/usb/core/hcd.c
42287@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42288
42289 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42290
42291-struct usb_mon_operations *mon_ops;
42292+const struct usb_mon_operations *mon_ops;
42293
42294 /*
42295 * The registration is unlocked.
42296@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42297 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42298 */
42299
42300-int usb_mon_register (struct usb_mon_operations *ops)
42301+int usb_mon_register (const struct usb_mon_operations *ops)
42302 {
42303
42304 if (mon_ops)
42305diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42306index bcbe104..9cfd1c6 100644
42307--- a/drivers/usb/core/hcd.h
42308+++ b/drivers/usb/core/hcd.h
42309@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42310 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42311
42312 struct usb_mon_operations {
42313- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42314- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42315- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42316+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42317+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42318+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42319 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42320 };
42321
42322-extern struct usb_mon_operations *mon_ops;
42323+extern const struct usb_mon_operations *mon_ops;
42324
42325 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42326 {
42327@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42328 (*mon_ops->urb_complete)(bus, urb, status);
42329 }
42330
42331-int usb_mon_register(struct usb_mon_operations *ops);
42332+int usb_mon_register(const struct usb_mon_operations *ops);
42333 void usb_mon_deregister(void);
42334
42335 #else
42336diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42337index 409cc94..a673bad 100644
42338--- a/drivers/usb/core/message.c
42339+++ b/drivers/usb/core/message.c
42340@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42341 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42342 if (buf) {
42343 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42344- if (len > 0) {
42345- smallbuf = kmalloc(++len, GFP_NOIO);
42346+ if (len++ > 0) {
42347+ smallbuf = kmalloc(len, GFP_NOIO);
42348 if (!smallbuf)
42349 return buf;
42350 memcpy(smallbuf, buf, len);
42351diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42352index 62ff5e7..530b74e 100644
42353--- a/drivers/usb/misc/appledisplay.c
42354+++ b/drivers/usb/misc/appledisplay.c
42355@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42356 return pdata->msgdata[1];
42357 }
42358
42359-static struct backlight_ops appledisplay_bl_data = {
42360+static const struct backlight_ops appledisplay_bl_data = {
42361 .get_brightness = appledisplay_bl_get_brightness,
42362 .update_status = appledisplay_bl_update_status,
42363 };
42364diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42365index e0c2db3..bd8cb66 100644
42366--- a/drivers/usb/mon/mon_main.c
42367+++ b/drivers/usb/mon/mon_main.c
42368@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42369 /*
42370 * Ops
42371 */
42372-static struct usb_mon_operations mon_ops_0 = {
42373+static const struct usb_mon_operations mon_ops_0 = {
42374 .urb_submit = mon_submit,
42375 .urb_submit_error = mon_submit_error,
42376 .urb_complete = mon_complete,
42377diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42378index d6bea3e..60b250e 100644
42379--- a/drivers/usb/wusbcore/wa-hc.h
42380+++ b/drivers/usb/wusbcore/wa-hc.h
42381@@ -192,7 +192,7 @@ struct wahc {
42382 struct list_head xfer_delayed_list;
42383 spinlock_t xfer_list_lock;
42384 struct work_struct xfer_work;
42385- atomic_t xfer_id_count;
42386+ atomic_unchecked_t xfer_id_count;
42387 };
42388
42389
42390@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42391 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42392 spin_lock_init(&wa->xfer_list_lock);
42393 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42394- atomic_set(&wa->xfer_id_count, 1);
42395+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42396 }
42397
42398 /**
42399diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42400index 613a5fc..3174865 100644
42401--- a/drivers/usb/wusbcore/wa-xfer.c
42402+++ b/drivers/usb/wusbcore/wa-xfer.c
42403@@ -293,7 +293,7 @@ out:
42404 */
42405 static void wa_xfer_id_init(struct wa_xfer *xfer)
42406 {
42407- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42408+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42409 }
42410
42411 /*
42412diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42413index aa42fce..f8a828c 100644
42414--- a/drivers/uwb/wlp/messages.c
42415+++ b/drivers/uwb/wlp/messages.c
42416@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42417 size_t len = skb->len;
42418 size_t used;
42419 ssize_t result;
42420- struct wlp_nonce enonce, rnonce;
42421+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42422 enum wlp_assc_error assc_err;
42423 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42424 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42425diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42426index 0370399..6627c94 100644
42427--- a/drivers/uwb/wlp/sysfs.c
42428+++ b/drivers/uwb/wlp/sysfs.c
42429@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42430 return ret;
42431 }
42432
42433-static
42434-struct sysfs_ops wss_sysfs_ops = {
42435+static const struct sysfs_ops wss_sysfs_ops = {
42436 .show = wlp_wss_attr_show,
42437 .store = wlp_wss_attr_store,
42438 };
42439diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42440index d5e8010..5687b56 100644
42441--- a/drivers/video/atmel_lcdfb.c
42442+++ b/drivers/video/atmel_lcdfb.c
42443@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42444 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42445 }
42446
42447-static struct backlight_ops atmel_lcdc_bl_ops = {
42448+static const struct backlight_ops atmel_lcdc_bl_ops = {
42449 .update_status = atmel_bl_update_status,
42450 .get_brightness = atmel_bl_get_brightness,
42451 };
42452diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42453index e4e4d43..66bcbcc 100644
42454--- a/drivers/video/aty/aty128fb.c
42455+++ b/drivers/video/aty/aty128fb.c
42456@@ -149,7 +149,7 @@ enum {
42457 };
42458
42459 /* Must match above enum */
42460-static const char *r128_family[] __devinitdata = {
42461+static const char *r128_family[] __devinitconst = {
42462 "AGP",
42463 "PCI",
42464 "PRO AGP",
42465@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42466 return bd->props.brightness;
42467 }
42468
42469-static struct backlight_ops aty128_bl_data = {
42470+static const struct backlight_ops aty128_bl_data = {
42471 .get_brightness = aty128_bl_get_brightness,
42472 .update_status = aty128_bl_update_status,
42473 };
42474diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42475index 913b4a4..9295a38 100644
42476--- a/drivers/video/aty/atyfb_base.c
42477+++ b/drivers/video/aty/atyfb_base.c
42478@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42479 return bd->props.brightness;
42480 }
42481
42482-static struct backlight_ops aty_bl_data = {
42483+static const struct backlight_ops aty_bl_data = {
42484 .get_brightness = aty_bl_get_brightness,
42485 .update_status = aty_bl_update_status,
42486 };
42487diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42488index 1a056ad..221bd6a 100644
42489--- a/drivers/video/aty/radeon_backlight.c
42490+++ b/drivers/video/aty/radeon_backlight.c
42491@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42492 return bd->props.brightness;
42493 }
42494
42495-static struct backlight_ops radeon_bl_data = {
42496+static const struct backlight_ops radeon_bl_data = {
42497 .get_brightness = radeon_bl_get_brightness,
42498 .update_status = radeon_bl_update_status,
42499 };
42500diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42501index ad05da5..3cb2cb9 100644
42502--- a/drivers/video/backlight/adp5520_bl.c
42503+++ b/drivers/video/backlight/adp5520_bl.c
42504@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42505 return error ? data->current_brightness : reg_val;
42506 }
42507
42508-static struct backlight_ops adp5520_bl_ops = {
42509+static const struct backlight_ops adp5520_bl_ops = {
42510 .update_status = adp5520_bl_update_status,
42511 .get_brightness = adp5520_bl_get_brightness,
42512 };
42513diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42514index 2c3bdfc..d769b0b 100644
42515--- a/drivers/video/backlight/adx_bl.c
42516+++ b/drivers/video/backlight/adx_bl.c
42517@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42518 return 1;
42519 }
42520
42521-static struct backlight_ops adx_backlight_ops = {
42522+static const struct backlight_ops adx_backlight_ops = {
42523 .options = 0,
42524 .update_status = adx_backlight_update_status,
42525 .get_brightness = adx_backlight_get_brightness,
42526diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42527index 505c082..6b6b3cc 100644
42528--- a/drivers/video/backlight/atmel-pwm-bl.c
42529+++ b/drivers/video/backlight/atmel-pwm-bl.c
42530@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42531 return pwm_channel_enable(&pwmbl->pwmc);
42532 }
42533
42534-static struct backlight_ops atmel_pwm_bl_ops = {
42535+static const struct backlight_ops atmel_pwm_bl_ops = {
42536 .get_brightness = atmel_pwm_bl_get_intensity,
42537 .update_status = atmel_pwm_bl_set_intensity,
42538 };
42539diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42540index 5e20e6e..89025e6 100644
42541--- a/drivers/video/backlight/backlight.c
42542+++ b/drivers/video/backlight/backlight.c
42543@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42544 * ERR_PTR() or a pointer to the newly allocated device.
42545 */
42546 struct backlight_device *backlight_device_register(const char *name,
42547- struct device *parent, void *devdata, struct backlight_ops *ops)
42548+ struct device *parent, void *devdata, const struct backlight_ops *ops)
42549 {
42550 struct backlight_device *new_bd;
42551 int rc;
42552diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42553index 9677494..b4bcf80 100644
42554--- a/drivers/video/backlight/corgi_lcd.c
42555+++ b/drivers/video/backlight/corgi_lcd.c
42556@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42557 }
42558 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42559
42560-static struct backlight_ops corgi_bl_ops = {
42561+static const struct backlight_ops corgi_bl_ops = {
42562 .get_brightness = corgi_bl_get_intensity,
42563 .update_status = corgi_bl_update_status,
42564 };
42565diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42566index b9fe62b..2914bf1 100644
42567--- a/drivers/video/backlight/cr_bllcd.c
42568+++ b/drivers/video/backlight/cr_bllcd.c
42569@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42570 return intensity;
42571 }
42572
42573-static struct backlight_ops cr_backlight_ops = {
42574+static const struct backlight_ops cr_backlight_ops = {
42575 .get_brightness = cr_backlight_get_intensity,
42576 .update_status = cr_backlight_set_intensity,
42577 };
42578diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42579index 701a108..feacfd5 100644
42580--- a/drivers/video/backlight/da903x_bl.c
42581+++ b/drivers/video/backlight/da903x_bl.c
42582@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42583 return data->current_brightness;
42584 }
42585
42586-static struct backlight_ops da903x_backlight_ops = {
42587+static const struct backlight_ops da903x_backlight_ops = {
42588 .update_status = da903x_backlight_update_status,
42589 .get_brightness = da903x_backlight_get_brightness,
42590 };
42591diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42592index 6d27f62..e6d348e 100644
42593--- a/drivers/video/backlight/generic_bl.c
42594+++ b/drivers/video/backlight/generic_bl.c
42595@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42596 }
42597 EXPORT_SYMBOL(corgibl_limit_intensity);
42598
42599-static struct backlight_ops genericbl_ops = {
42600+static const struct backlight_ops genericbl_ops = {
42601 .options = BL_CORE_SUSPENDRESUME,
42602 .get_brightness = genericbl_get_intensity,
42603 .update_status = genericbl_send_intensity,
42604diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42605index 7fb4eef..f7cc528 100644
42606--- a/drivers/video/backlight/hp680_bl.c
42607+++ b/drivers/video/backlight/hp680_bl.c
42608@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42609 return current_intensity;
42610 }
42611
42612-static struct backlight_ops hp680bl_ops = {
42613+static const struct backlight_ops hp680bl_ops = {
42614 .get_brightness = hp680bl_get_intensity,
42615 .update_status = hp680bl_set_intensity,
42616 };
42617diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42618index 7aed256..db9071f 100644
42619--- a/drivers/video/backlight/jornada720_bl.c
42620+++ b/drivers/video/backlight/jornada720_bl.c
42621@@ -93,7 +93,7 @@ out:
42622 return ret;
42623 }
42624
42625-static struct backlight_ops jornada_bl_ops = {
42626+static const struct backlight_ops jornada_bl_ops = {
42627 .get_brightness = jornada_bl_get_brightness,
42628 .update_status = jornada_bl_update_status,
42629 .options = BL_CORE_SUSPENDRESUME,
42630diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42631index a38fda1..939e7b8 100644
42632--- a/drivers/video/backlight/kb3886_bl.c
42633+++ b/drivers/video/backlight/kb3886_bl.c
42634@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42635 return kb3886bl_intensity;
42636 }
42637
42638-static struct backlight_ops kb3886bl_ops = {
42639+static const struct backlight_ops kb3886bl_ops = {
42640 .get_brightness = kb3886bl_get_intensity,
42641 .update_status = kb3886bl_send_intensity,
42642 };
42643diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42644index 6b488b8..00a9591 100644
42645--- a/drivers/video/backlight/locomolcd.c
42646+++ b/drivers/video/backlight/locomolcd.c
42647@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42648 return current_intensity;
42649 }
42650
42651-static struct backlight_ops locomobl_data = {
42652+static const struct backlight_ops locomobl_data = {
42653 .get_brightness = locomolcd_get_intensity,
42654 .update_status = locomolcd_set_intensity,
42655 };
42656diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42657index 99bdfa8..3dac448 100644
42658--- a/drivers/video/backlight/mbp_nvidia_bl.c
42659+++ b/drivers/video/backlight/mbp_nvidia_bl.c
42660@@ -33,7 +33,7 @@ struct dmi_match_data {
42661 unsigned long iostart;
42662 unsigned long iolen;
42663 /* Backlight operations structure. */
42664- struct backlight_ops backlight_ops;
42665+ const struct backlight_ops backlight_ops;
42666 };
42667
42668 /* Module parameters. */
42669diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42670index cbad67e..3cf900e 100644
42671--- a/drivers/video/backlight/omap1_bl.c
42672+++ b/drivers/video/backlight/omap1_bl.c
42673@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42674 return bl->current_intensity;
42675 }
42676
42677-static struct backlight_ops omapbl_ops = {
42678+static const struct backlight_ops omapbl_ops = {
42679 .get_brightness = omapbl_get_intensity,
42680 .update_status = omapbl_update_status,
42681 };
42682diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42683index 9edaf24..075786e 100644
42684--- a/drivers/video/backlight/progear_bl.c
42685+++ b/drivers/video/backlight/progear_bl.c
42686@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42687 return intensity - HW_LEVEL_MIN;
42688 }
42689
42690-static struct backlight_ops progearbl_ops = {
42691+static const struct backlight_ops progearbl_ops = {
42692 .get_brightness = progearbl_get_intensity,
42693 .update_status = progearbl_set_intensity,
42694 };
42695diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42696index 8871662..df9e0b3 100644
42697--- a/drivers/video/backlight/pwm_bl.c
42698+++ b/drivers/video/backlight/pwm_bl.c
42699@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42700 return bl->props.brightness;
42701 }
42702
42703-static struct backlight_ops pwm_backlight_ops = {
42704+static const struct backlight_ops pwm_backlight_ops = {
42705 .update_status = pwm_backlight_update_status,
42706 .get_brightness = pwm_backlight_get_brightness,
42707 };
42708diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42709index 43edbad..e14ce4d 100644
42710--- a/drivers/video/backlight/tosa_bl.c
42711+++ b/drivers/video/backlight/tosa_bl.c
42712@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42713 return props->brightness;
42714 }
42715
42716-static struct backlight_ops bl_ops = {
42717+static const struct backlight_ops bl_ops = {
42718 .get_brightness = tosa_bl_get_brightness,
42719 .update_status = tosa_bl_update_status,
42720 };
42721diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42722index 467bdb7..e32add3 100644
42723--- a/drivers/video/backlight/wm831x_bl.c
42724+++ b/drivers/video/backlight/wm831x_bl.c
42725@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42726 return data->current_brightness;
42727 }
42728
42729-static struct backlight_ops wm831x_backlight_ops = {
42730+static const struct backlight_ops wm831x_backlight_ops = {
42731 .options = BL_CORE_SUSPENDRESUME,
42732 .update_status = wm831x_backlight_update_status,
42733 .get_brightness = wm831x_backlight_get_brightness,
42734diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42735index e49ae5e..db4e6f7 100644
42736--- a/drivers/video/bf54x-lq043fb.c
42737+++ b/drivers/video/bf54x-lq043fb.c
42738@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42739 return 0;
42740 }
42741
42742-static struct backlight_ops bfin_lq043fb_bl_ops = {
42743+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42744 .get_brightness = bl_get_brightness,
42745 };
42746
42747diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42748index 2c72a7c..d523e52 100644
42749--- a/drivers/video/bfin-t350mcqb-fb.c
42750+++ b/drivers/video/bfin-t350mcqb-fb.c
42751@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42752 return 0;
42753 }
42754
42755-static struct backlight_ops bfin_lq043fb_bl_ops = {
42756+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42757 .get_brightness = bl_get_brightness,
42758 };
42759
42760diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42761index f53b9f1..958bf4e 100644
42762--- a/drivers/video/fbcmap.c
42763+++ b/drivers/video/fbcmap.c
42764@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42765 rc = -ENODEV;
42766 goto out;
42767 }
42768- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42769- !info->fbops->fb_setcmap)) {
42770+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42771 rc = -EINVAL;
42772 goto out1;
42773 }
42774diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
42775index 99bbd28..ad3829e 100644
42776--- a/drivers/video/fbmem.c
42777+++ b/drivers/video/fbmem.c
42778@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42779 image->dx += image->width + 8;
42780 }
42781 } else if (rotate == FB_ROTATE_UD) {
42782- for (x = 0; x < num && image->dx >= 0; x++) {
42783+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
42784 info->fbops->fb_imageblit(info, image);
42785 image->dx -= image->width + 8;
42786 }
42787@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42788 image->dy += image->height + 8;
42789 }
42790 } else if (rotate == FB_ROTATE_CCW) {
42791- for (x = 0; x < num && image->dy >= 0; x++) {
42792+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
42793 info->fbops->fb_imageblit(info, image);
42794 image->dy -= image->height + 8;
42795 }
42796@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
42797 int flags = info->flags;
42798 int ret = 0;
42799
42800+ pax_track_stack();
42801+
42802 if (var->activate & FB_ACTIVATE_INV_MODE) {
42803 struct fb_videomode mode1, mode2;
42804
42805@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42806 void __user *argp = (void __user *)arg;
42807 long ret = 0;
42808
42809+ pax_track_stack();
42810+
42811 switch (cmd) {
42812 case FBIOGET_VSCREENINFO:
42813 if (!lock_fb_info(info))
42814@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42815 return -EFAULT;
42816 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
42817 return -EINVAL;
42818- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
42819+ if (con2fb.framebuffer >= FB_MAX)
42820 return -EINVAL;
42821 if (!registered_fb[con2fb.framebuffer])
42822 request_module("fb%d", con2fb.framebuffer);
42823diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
42824index f20eff8..3e4f622 100644
42825--- a/drivers/video/geode/gx1fb_core.c
42826+++ b/drivers/video/geode/gx1fb_core.c
42827@@ -30,7 +30,7 @@ static int crt_option = 1;
42828 static char panel_option[32] = "";
42829
42830 /* Modes relevant to the GX1 (taken from modedb.c) */
42831-static const struct fb_videomode __initdata gx1_modedb[] = {
42832+static const struct fb_videomode __initconst gx1_modedb[] = {
42833 /* 640x480-60 VESA */
42834 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
42835 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
42836diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
42837index 896e53d..4d87d0b 100644
42838--- a/drivers/video/gxt4500.c
42839+++ b/drivers/video/gxt4500.c
42840@@ -156,7 +156,7 @@ struct gxt4500_par {
42841 static char *mode_option;
42842
42843 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
42844-static const struct fb_videomode defaultmode __devinitdata = {
42845+static const struct fb_videomode defaultmode __devinitconst = {
42846 .refresh = 60,
42847 .xres = 1280,
42848 .yres = 1024,
42849@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
42850 return 0;
42851 }
42852
42853-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
42854+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
42855 .id = "IBM GXT4500P",
42856 .type = FB_TYPE_PACKED_PIXELS,
42857 .visual = FB_VISUAL_PSEUDOCOLOR,
42858diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
42859index f5bedee..28c6028 100644
42860--- a/drivers/video/i810/i810_accel.c
42861+++ b/drivers/video/i810/i810_accel.c
42862@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
42863 }
42864 }
42865 printk("ringbuffer lockup!!!\n");
42866+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
42867 i810_report_error(mmio);
42868 par->dev_flags |= LOCKUP;
42869 info->pixmap.scan_align = 1;
42870diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
42871index 5743ea2..457f82c 100644
42872--- a/drivers/video/i810/i810_main.c
42873+++ b/drivers/video/i810/i810_main.c
42874@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
42875 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
42876
42877 /* PCI */
42878-static const char *i810_pci_list[] __devinitdata = {
42879+static const char *i810_pci_list[] __devinitconst = {
42880 "Intel(R) 810 Framebuffer Device" ,
42881 "Intel(R) 810-DC100 Framebuffer Device" ,
42882 "Intel(R) 810E Framebuffer Device" ,
42883diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
42884index 3c14e43..eafa544 100644
42885--- a/drivers/video/logo/logo_linux_clut224.ppm
42886+++ b/drivers/video/logo/logo_linux_clut224.ppm
42887@@ -1,1604 +1,1123 @@
42888 P3
42889-# Standard 224-color Linux logo
42890 80 80
42891 255
42892- 0 0 0 0 0 0 0 0 0 0 0 0
42893- 0 0 0 0 0 0 0 0 0 0 0 0
42894- 0 0 0 0 0 0 0 0 0 0 0 0
42895- 0 0 0 0 0 0 0 0 0 0 0 0
42896- 0 0 0 0 0 0 0 0 0 0 0 0
42897- 0 0 0 0 0 0 0 0 0 0 0 0
42898- 0 0 0 0 0 0 0 0 0 0 0 0
42899- 0 0 0 0 0 0 0 0 0 0 0 0
42900- 0 0 0 0 0 0 0 0 0 0 0 0
42901- 6 6 6 6 6 6 10 10 10 10 10 10
42902- 10 10 10 6 6 6 6 6 6 6 6 6
42903- 0 0 0 0 0 0 0 0 0 0 0 0
42904- 0 0 0 0 0 0 0 0 0 0 0 0
42905- 0 0 0 0 0 0 0 0 0 0 0 0
42906- 0 0 0 0 0 0 0 0 0 0 0 0
42907- 0 0 0 0 0 0 0 0 0 0 0 0
42908- 0 0 0 0 0 0 0 0 0 0 0 0
42909- 0 0 0 0 0 0 0 0 0 0 0 0
42910- 0 0 0 0 0 0 0 0 0 0 0 0
42911- 0 0 0 0 0 0 0 0 0 0 0 0
42912- 0 0 0 0 0 0 0 0 0 0 0 0
42913- 0 0 0 0 0 0 0 0 0 0 0 0
42914- 0 0 0 0 0 0 0 0 0 0 0 0
42915- 0 0 0 0 0 0 0 0 0 0 0 0
42916- 0 0 0 0 0 0 0 0 0 0 0 0
42917- 0 0 0 0 0 0 0 0 0 0 0 0
42918- 0 0 0 0 0 0 0 0 0 0 0 0
42919- 0 0 0 0 0 0 0 0 0 0 0 0
42920- 0 0 0 6 6 6 10 10 10 14 14 14
42921- 22 22 22 26 26 26 30 30 30 34 34 34
42922- 30 30 30 30 30 30 26 26 26 18 18 18
42923- 14 14 14 10 10 10 6 6 6 0 0 0
42924- 0 0 0 0 0 0 0 0 0 0 0 0
42925- 0 0 0 0 0 0 0 0 0 0 0 0
42926- 0 0 0 0 0 0 0 0 0 0 0 0
42927- 0 0 0 0 0 0 0 0 0 0 0 0
42928- 0 0 0 0 0 0 0 0 0 0 0 0
42929- 0 0 0 0 0 0 0 0 0 0 0 0
42930- 0 0 0 0 0 0 0 0 0 0 0 0
42931- 0 0 0 0 0 0 0 0 0 0 0 0
42932- 0 0 0 0 0 0 0 0 0 0 0 0
42933- 0 0 0 0 0 1 0 0 1 0 0 0
42934- 0 0 0 0 0 0 0 0 0 0 0 0
42935- 0 0 0 0 0 0 0 0 0 0 0 0
42936- 0 0 0 0 0 0 0 0 0 0 0 0
42937- 0 0 0 0 0 0 0 0 0 0 0 0
42938- 0 0 0 0 0 0 0 0 0 0 0 0
42939- 0 0 0 0 0 0 0 0 0 0 0 0
42940- 6 6 6 14 14 14 26 26 26 42 42 42
42941- 54 54 54 66 66 66 78 78 78 78 78 78
42942- 78 78 78 74 74 74 66 66 66 54 54 54
42943- 42 42 42 26 26 26 18 18 18 10 10 10
42944- 6 6 6 0 0 0 0 0 0 0 0 0
42945- 0 0 0 0 0 0 0 0 0 0 0 0
42946- 0 0 0 0 0 0 0 0 0 0 0 0
42947- 0 0 0 0 0 0 0 0 0 0 0 0
42948- 0 0 0 0 0 0 0 0 0 0 0 0
42949- 0 0 0 0 0 0 0 0 0 0 0 0
42950- 0 0 0 0 0 0 0 0 0 0 0 0
42951- 0 0 0 0 0 0 0 0 0 0 0 0
42952- 0 0 0 0 0 0 0 0 0 0 0 0
42953- 0 0 1 0 0 0 0 0 0 0 0 0
42954- 0 0 0 0 0 0 0 0 0 0 0 0
42955- 0 0 0 0 0 0 0 0 0 0 0 0
42956- 0 0 0 0 0 0 0 0 0 0 0 0
42957- 0 0 0 0 0 0 0 0 0 0 0 0
42958- 0 0 0 0 0 0 0 0 0 0 0 0
42959- 0 0 0 0 0 0 0 0 0 10 10 10
42960- 22 22 22 42 42 42 66 66 66 86 86 86
42961- 66 66 66 38 38 38 38 38 38 22 22 22
42962- 26 26 26 34 34 34 54 54 54 66 66 66
42963- 86 86 86 70 70 70 46 46 46 26 26 26
42964- 14 14 14 6 6 6 0 0 0 0 0 0
42965- 0 0 0 0 0 0 0 0 0 0 0 0
42966- 0 0 0 0 0 0 0 0 0 0 0 0
42967- 0 0 0 0 0 0 0 0 0 0 0 0
42968- 0 0 0 0 0 0 0 0 0 0 0 0
42969- 0 0 0 0 0 0 0 0 0 0 0 0
42970- 0 0 0 0 0 0 0 0 0 0 0 0
42971- 0 0 0 0 0 0 0 0 0 0 0 0
42972- 0 0 0 0 0 0 0 0 0 0 0 0
42973- 0 0 1 0 0 1 0 0 1 0 0 0
42974- 0 0 0 0 0 0 0 0 0 0 0 0
42975- 0 0 0 0 0 0 0 0 0 0 0 0
42976- 0 0 0 0 0 0 0 0 0 0 0 0
42977- 0 0 0 0 0 0 0 0 0 0 0 0
42978- 0 0 0 0 0 0 0 0 0 0 0 0
42979- 0 0 0 0 0 0 10 10 10 26 26 26
42980- 50 50 50 82 82 82 58 58 58 6 6 6
42981- 2 2 6 2 2 6 2 2 6 2 2 6
42982- 2 2 6 2 2 6 2 2 6 2 2 6
42983- 6 6 6 54 54 54 86 86 86 66 66 66
42984- 38 38 38 18 18 18 6 6 6 0 0 0
42985- 0 0 0 0 0 0 0 0 0 0 0 0
42986- 0 0 0 0 0 0 0 0 0 0 0 0
42987- 0 0 0 0 0 0 0 0 0 0 0 0
42988- 0 0 0 0 0 0 0 0 0 0 0 0
42989- 0 0 0 0 0 0 0 0 0 0 0 0
42990- 0 0 0 0 0 0 0 0 0 0 0 0
42991- 0 0 0 0 0 0 0 0 0 0 0 0
42992- 0 0 0 0 0 0 0 0 0 0 0 0
42993- 0 0 0 0 0 0 0 0 0 0 0 0
42994- 0 0 0 0 0 0 0 0 0 0 0 0
42995- 0 0 0 0 0 0 0 0 0 0 0 0
42996- 0 0 0 0 0 0 0 0 0 0 0 0
42997- 0 0 0 0 0 0 0 0 0 0 0 0
42998- 0 0 0 0 0 0 0 0 0 0 0 0
42999- 0 0 0 6 6 6 22 22 22 50 50 50
43000- 78 78 78 34 34 34 2 2 6 2 2 6
43001- 2 2 6 2 2 6 2 2 6 2 2 6
43002- 2 2 6 2 2 6 2 2 6 2 2 6
43003- 2 2 6 2 2 6 6 6 6 70 70 70
43004- 78 78 78 46 46 46 22 22 22 6 6 6
43005- 0 0 0 0 0 0 0 0 0 0 0 0
43006- 0 0 0 0 0 0 0 0 0 0 0 0
43007- 0 0 0 0 0 0 0 0 0 0 0 0
43008- 0 0 0 0 0 0 0 0 0 0 0 0
43009- 0 0 0 0 0 0 0 0 0 0 0 0
43010- 0 0 0 0 0 0 0 0 0 0 0 0
43011- 0 0 0 0 0 0 0 0 0 0 0 0
43012- 0 0 0 0 0 0 0 0 0 0 0 0
43013- 0 0 1 0 0 1 0 0 1 0 0 0
43014- 0 0 0 0 0 0 0 0 0 0 0 0
43015- 0 0 0 0 0 0 0 0 0 0 0 0
43016- 0 0 0 0 0 0 0 0 0 0 0 0
43017- 0 0 0 0 0 0 0 0 0 0 0 0
43018- 0 0 0 0 0 0 0 0 0 0 0 0
43019- 6 6 6 18 18 18 42 42 42 82 82 82
43020- 26 26 26 2 2 6 2 2 6 2 2 6
43021- 2 2 6 2 2 6 2 2 6 2 2 6
43022- 2 2 6 2 2 6 2 2 6 14 14 14
43023- 46 46 46 34 34 34 6 6 6 2 2 6
43024- 42 42 42 78 78 78 42 42 42 18 18 18
43025- 6 6 6 0 0 0 0 0 0 0 0 0
43026- 0 0 0 0 0 0 0 0 0 0 0 0
43027- 0 0 0 0 0 0 0 0 0 0 0 0
43028- 0 0 0 0 0 0 0 0 0 0 0 0
43029- 0 0 0 0 0 0 0 0 0 0 0 0
43030- 0 0 0 0 0 0 0 0 0 0 0 0
43031- 0 0 0 0 0 0 0 0 0 0 0 0
43032- 0 0 0 0 0 0 0 0 0 0 0 0
43033- 0 0 1 0 0 0 0 0 1 0 0 0
43034- 0 0 0 0 0 0 0 0 0 0 0 0
43035- 0 0 0 0 0 0 0 0 0 0 0 0
43036- 0 0 0 0 0 0 0 0 0 0 0 0
43037- 0 0 0 0 0 0 0 0 0 0 0 0
43038- 0 0 0 0 0 0 0 0 0 0 0 0
43039- 10 10 10 30 30 30 66 66 66 58 58 58
43040- 2 2 6 2 2 6 2 2 6 2 2 6
43041- 2 2 6 2 2 6 2 2 6 2 2 6
43042- 2 2 6 2 2 6 2 2 6 26 26 26
43043- 86 86 86 101 101 101 46 46 46 10 10 10
43044- 2 2 6 58 58 58 70 70 70 34 34 34
43045- 10 10 10 0 0 0 0 0 0 0 0 0
43046- 0 0 0 0 0 0 0 0 0 0 0 0
43047- 0 0 0 0 0 0 0 0 0 0 0 0
43048- 0 0 0 0 0 0 0 0 0 0 0 0
43049- 0 0 0 0 0 0 0 0 0 0 0 0
43050- 0 0 0 0 0 0 0 0 0 0 0 0
43051- 0 0 0 0 0 0 0 0 0 0 0 0
43052- 0 0 0 0 0 0 0 0 0 0 0 0
43053- 0 0 1 0 0 1 0 0 1 0 0 0
43054- 0 0 0 0 0 0 0 0 0 0 0 0
43055- 0 0 0 0 0 0 0 0 0 0 0 0
43056- 0 0 0 0 0 0 0 0 0 0 0 0
43057- 0 0 0 0 0 0 0 0 0 0 0 0
43058- 0 0 0 0 0 0 0 0 0 0 0 0
43059- 14 14 14 42 42 42 86 86 86 10 10 10
43060- 2 2 6 2 2 6 2 2 6 2 2 6
43061- 2 2 6 2 2 6 2 2 6 2 2 6
43062- 2 2 6 2 2 6 2 2 6 30 30 30
43063- 94 94 94 94 94 94 58 58 58 26 26 26
43064- 2 2 6 6 6 6 78 78 78 54 54 54
43065- 22 22 22 6 6 6 0 0 0 0 0 0
43066- 0 0 0 0 0 0 0 0 0 0 0 0
43067- 0 0 0 0 0 0 0 0 0 0 0 0
43068- 0 0 0 0 0 0 0 0 0 0 0 0
43069- 0 0 0 0 0 0 0 0 0 0 0 0
43070- 0 0 0 0 0 0 0 0 0 0 0 0
43071- 0 0 0 0 0 0 0 0 0 0 0 0
43072- 0 0 0 0 0 0 0 0 0 0 0 0
43073- 0 0 0 0 0 0 0 0 0 0 0 0
43074- 0 0 0 0 0 0 0 0 0 0 0 0
43075- 0 0 0 0 0 0 0 0 0 0 0 0
43076- 0 0 0 0 0 0 0 0 0 0 0 0
43077- 0 0 0 0 0 0 0 0 0 0 0 0
43078- 0 0 0 0 0 0 0 0 0 6 6 6
43079- 22 22 22 62 62 62 62 62 62 2 2 6
43080- 2 2 6 2 2 6 2 2 6 2 2 6
43081- 2 2 6 2 2 6 2 2 6 2 2 6
43082- 2 2 6 2 2 6 2 2 6 26 26 26
43083- 54 54 54 38 38 38 18 18 18 10 10 10
43084- 2 2 6 2 2 6 34 34 34 82 82 82
43085- 38 38 38 14 14 14 0 0 0 0 0 0
43086- 0 0 0 0 0 0 0 0 0 0 0 0
43087- 0 0 0 0 0 0 0 0 0 0 0 0
43088- 0 0 0 0 0 0 0 0 0 0 0 0
43089- 0 0 0 0 0 0 0 0 0 0 0 0
43090- 0 0 0 0 0 0 0 0 0 0 0 0
43091- 0 0 0 0 0 0 0 0 0 0 0 0
43092- 0 0 0 0 0 0 0 0 0 0 0 0
43093- 0 0 0 0 0 1 0 0 1 0 0 0
43094- 0 0 0 0 0 0 0 0 0 0 0 0
43095- 0 0 0 0 0 0 0 0 0 0 0 0
43096- 0 0 0 0 0 0 0 0 0 0 0 0
43097- 0 0 0 0 0 0 0 0 0 0 0 0
43098- 0 0 0 0 0 0 0 0 0 6 6 6
43099- 30 30 30 78 78 78 30 30 30 2 2 6
43100- 2 2 6 2 2 6 2 2 6 2 2 6
43101- 2 2 6 2 2 6 2 2 6 2 2 6
43102- 2 2 6 2 2 6 2 2 6 10 10 10
43103- 10 10 10 2 2 6 2 2 6 2 2 6
43104- 2 2 6 2 2 6 2 2 6 78 78 78
43105- 50 50 50 18 18 18 6 6 6 0 0 0
43106- 0 0 0 0 0 0 0 0 0 0 0 0
43107- 0 0 0 0 0 0 0 0 0 0 0 0
43108- 0 0 0 0 0 0 0 0 0 0 0 0
43109- 0 0 0 0 0 0 0 0 0 0 0 0
43110- 0 0 0 0 0 0 0 0 0 0 0 0
43111- 0 0 0 0 0 0 0 0 0 0 0 0
43112- 0 0 0 0 0 0 0 0 0 0 0 0
43113- 0 0 1 0 0 0 0 0 0 0 0 0
43114- 0 0 0 0 0 0 0 0 0 0 0 0
43115- 0 0 0 0 0 0 0 0 0 0 0 0
43116- 0 0 0 0 0 0 0 0 0 0 0 0
43117- 0 0 0 0 0 0 0 0 0 0 0 0
43118- 0 0 0 0 0 0 0 0 0 10 10 10
43119- 38 38 38 86 86 86 14 14 14 2 2 6
43120- 2 2 6 2 2 6 2 2 6 2 2 6
43121- 2 2 6 2 2 6 2 2 6 2 2 6
43122- 2 2 6 2 2 6 2 2 6 2 2 6
43123- 2 2 6 2 2 6 2 2 6 2 2 6
43124- 2 2 6 2 2 6 2 2 6 54 54 54
43125- 66 66 66 26 26 26 6 6 6 0 0 0
43126- 0 0 0 0 0 0 0 0 0 0 0 0
43127- 0 0 0 0 0 0 0 0 0 0 0 0
43128- 0 0 0 0 0 0 0 0 0 0 0 0
43129- 0 0 0 0 0 0 0 0 0 0 0 0
43130- 0 0 0 0 0 0 0 0 0 0 0 0
43131- 0 0 0 0 0 0 0 0 0 0 0 0
43132- 0 0 0 0 0 0 0 0 0 0 0 0
43133- 0 0 0 0 0 1 0 0 1 0 0 0
43134- 0 0 0 0 0 0 0 0 0 0 0 0
43135- 0 0 0 0 0 0 0 0 0 0 0 0
43136- 0 0 0 0 0 0 0 0 0 0 0 0
43137- 0 0 0 0 0 0 0 0 0 0 0 0
43138- 0 0 0 0 0 0 0 0 0 14 14 14
43139- 42 42 42 82 82 82 2 2 6 2 2 6
43140- 2 2 6 6 6 6 10 10 10 2 2 6
43141- 2 2 6 2 2 6 2 2 6 2 2 6
43142- 2 2 6 2 2 6 2 2 6 6 6 6
43143- 14 14 14 10 10 10 2 2 6 2 2 6
43144- 2 2 6 2 2 6 2 2 6 18 18 18
43145- 82 82 82 34 34 34 10 10 10 0 0 0
43146- 0 0 0 0 0 0 0 0 0 0 0 0
43147- 0 0 0 0 0 0 0 0 0 0 0 0
43148- 0 0 0 0 0 0 0 0 0 0 0 0
43149- 0 0 0 0 0 0 0 0 0 0 0 0
43150- 0 0 0 0 0 0 0 0 0 0 0 0
43151- 0 0 0 0 0 0 0 0 0 0 0 0
43152- 0 0 0 0 0 0 0 0 0 0 0 0
43153- 0 0 1 0 0 0 0 0 0 0 0 0
43154- 0 0 0 0 0 0 0 0 0 0 0 0
43155- 0 0 0 0 0 0 0 0 0 0 0 0
43156- 0 0 0 0 0 0 0 0 0 0 0 0
43157- 0 0 0 0 0 0 0 0 0 0 0 0
43158- 0 0 0 0 0 0 0 0 0 14 14 14
43159- 46 46 46 86 86 86 2 2 6 2 2 6
43160- 6 6 6 6 6 6 22 22 22 34 34 34
43161- 6 6 6 2 2 6 2 2 6 2 2 6
43162- 2 2 6 2 2 6 18 18 18 34 34 34
43163- 10 10 10 50 50 50 22 22 22 2 2 6
43164- 2 2 6 2 2 6 2 2 6 10 10 10
43165- 86 86 86 42 42 42 14 14 14 0 0 0
43166- 0 0 0 0 0 0 0 0 0 0 0 0
43167- 0 0 0 0 0 0 0 0 0 0 0 0
43168- 0 0 0 0 0 0 0 0 0 0 0 0
43169- 0 0 0 0 0 0 0 0 0 0 0 0
43170- 0 0 0 0 0 0 0 0 0 0 0 0
43171- 0 0 0 0 0 0 0 0 0 0 0 0
43172- 0 0 0 0 0 0 0 0 0 0 0 0
43173- 0 0 1 0 0 1 0 0 1 0 0 0
43174- 0 0 0 0 0 0 0 0 0 0 0 0
43175- 0 0 0 0 0 0 0 0 0 0 0 0
43176- 0 0 0 0 0 0 0 0 0 0 0 0
43177- 0 0 0 0 0 0 0 0 0 0 0 0
43178- 0 0 0 0 0 0 0 0 0 14 14 14
43179- 46 46 46 86 86 86 2 2 6 2 2 6
43180- 38 38 38 116 116 116 94 94 94 22 22 22
43181- 22 22 22 2 2 6 2 2 6 2 2 6
43182- 14 14 14 86 86 86 138 138 138 162 162 162
43183-154 154 154 38 38 38 26 26 26 6 6 6
43184- 2 2 6 2 2 6 2 2 6 2 2 6
43185- 86 86 86 46 46 46 14 14 14 0 0 0
43186- 0 0 0 0 0 0 0 0 0 0 0 0
43187- 0 0 0 0 0 0 0 0 0 0 0 0
43188- 0 0 0 0 0 0 0 0 0 0 0 0
43189- 0 0 0 0 0 0 0 0 0 0 0 0
43190- 0 0 0 0 0 0 0 0 0 0 0 0
43191- 0 0 0 0 0 0 0 0 0 0 0 0
43192- 0 0 0 0 0 0 0 0 0 0 0 0
43193- 0 0 0 0 0 0 0 0 0 0 0 0
43194- 0 0 0 0 0 0 0 0 0 0 0 0
43195- 0 0 0 0 0 0 0 0 0 0 0 0
43196- 0 0 0 0 0 0 0 0 0 0 0 0
43197- 0 0 0 0 0 0 0 0 0 0 0 0
43198- 0 0 0 0 0 0 0 0 0 14 14 14
43199- 46 46 46 86 86 86 2 2 6 14 14 14
43200-134 134 134 198 198 198 195 195 195 116 116 116
43201- 10 10 10 2 2 6 2 2 6 6 6 6
43202-101 98 89 187 187 187 210 210 210 218 218 218
43203-214 214 214 134 134 134 14 14 14 6 6 6
43204- 2 2 6 2 2 6 2 2 6 2 2 6
43205- 86 86 86 50 50 50 18 18 18 6 6 6
43206- 0 0 0 0 0 0 0 0 0 0 0 0
43207- 0 0 0 0 0 0 0 0 0 0 0 0
43208- 0 0 0 0 0 0 0 0 0 0 0 0
43209- 0 0 0 0 0 0 0 0 0 0 0 0
43210- 0 0 0 0 0 0 0 0 0 0 0 0
43211- 0 0 0 0 0 0 0 0 0 0 0 0
43212- 0 0 0 0 0 0 0 0 1 0 0 0
43213- 0 0 1 0 0 1 0 0 1 0 0 0
43214- 0 0 0 0 0 0 0 0 0 0 0 0
43215- 0 0 0 0 0 0 0 0 0 0 0 0
43216- 0 0 0 0 0 0 0 0 0 0 0 0
43217- 0 0 0 0 0 0 0 0 0 0 0 0
43218- 0 0 0 0 0 0 0 0 0 14 14 14
43219- 46 46 46 86 86 86 2 2 6 54 54 54
43220-218 218 218 195 195 195 226 226 226 246 246 246
43221- 58 58 58 2 2 6 2 2 6 30 30 30
43222-210 210 210 253 253 253 174 174 174 123 123 123
43223-221 221 221 234 234 234 74 74 74 2 2 6
43224- 2 2 6 2 2 6 2 2 6 2 2 6
43225- 70 70 70 58 58 58 22 22 22 6 6 6
43226- 0 0 0 0 0 0 0 0 0 0 0 0
43227- 0 0 0 0 0 0 0 0 0 0 0 0
43228- 0 0 0 0 0 0 0 0 0 0 0 0
43229- 0 0 0 0 0 0 0 0 0 0 0 0
43230- 0 0 0 0 0 0 0 0 0 0 0 0
43231- 0 0 0 0 0 0 0 0 0 0 0 0
43232- 0 0 0 0 0 0 0 0 0 0 0 0
43233- 0 0 0 0 0 0 0 0 0 0 0 0
43234- 0 0 0 0 0 0 0 0 0 0 0 0
43235- 0 0 0 0 0 0 0 0 0 0 0 0
43236- 0 0 0 0 0 0 0 0 0 0 0 0
43237- 0 0 0 0 0 0 0 0 0 0 0 0
43238- 0 0 0 0 0 0 0 0 0 14 14 14
43239- 46 46 46 82 82 82 2 2 6 106 106 106
43240-170 170 170 26 26 26 86 86 86 226 226 226
43241-123 123 123 10 10 10 14 14 14 46 46 46
43242-231 231 231 190 190 190 6 6 6 70 70 70
43243- 90 90 90 238 238 238 158 158 158 2 2 6
43244- 2 2 6 2 2 6 2 2 6 2 2 6
43245- 70 70 70 58 58 58 22 22 22 6 6 6
43246- 0 0 0 0 0 0 0 0 0 0 0 0
43247- 0 0 0 0 0 0 0 0 0 0 0 0
43248- 0 0 0 0 0 0 0 0 0 0 0 0
43249- 0 0 0 0 0 0 0 0 0 0 0 0
43250- 0 0 0 0 0 0 0 0 0 0 0 0
43251- 0 0 0 0 0 0 0 0 0 0 0 0
43252- 0 0 0 0 0 0 0 0 1 0 0 0
43253- 0 0 1 0 0 1 0 0 1 0 0 0
43254- 0 0 0 0 0 0 0 0 0 0 0 0
43255- 0 0 0 0 0 0 0 0 0 0 0 0
43256- 0 0 0 0 0 0 0 0 0 0 0 0
43257- 0 0 0 0 0 0 0 0 0 0 0 0
43258- 0 0 0 0 0 0 0 0 0 14 14 14
43259- 42 42 42 86 86 86 6 6 6 116 116 116
43260-106 106 106 6 6 6 70 70 70 149 149 149
43261-128 128 128 18 18 18 38 38 38 54 54 54
43262-221 221 221 106 106 106 2 2 6 14 14 14
43263- 46 46 46 190 190 190 198 198 198 2 2 6
43264- 2 2 6 2 2 6 2 2 6 2 2 6
43265- 74 74 74 62 62 62 22 22 22 6 6 6
43266- 0 0 0 0 0 0 0 0 0 0 0 0
43267- 0 0 0 0 0 0 0 0 0 0 0 0
43268- 0 0 0 0 0 0 0 0 0 0 0 0
43269- 0 0 0 0 0 0 0 0 0 0 0 0
43270- 0 0 0 0 0 0 0 0 0 0 0 0
43271- 0 0 0 0 0 0 0 0 0 0 0 0
43272- 0 0 0 0 0 0 0 0 1 0 0 0
43273- 0 0 1 0 0 0 0 0 1 0 0 0
43274- 0 0 0 0 0 0 0 0 0 0 0 0
43275- 0 0 0 0 0 0 0 0 0 0 0 0
43276- 0 0 0 0 0 0 0 0 0 0 0 0
43277- 0 0 0 0 0 0 0 0 0 0 0 0
43278- 0 0 0 0 0 0 0 0 0 14 14 14
43279- 42 42 42 94 94 94 14 14 14 101 101 101
43280-128 128 128 2 2 6 18 18 18 116 116 116
43281-118 98 46 121 92 8 121 92 8 98 78 10
43282-162 162 162 106 106 106 2 2 6 2 2 6
43283- 2 2 6 195 195 195 195 195 195 6 6 6
43284- 2 2 6 2 2 6 2 2 6 2 2 6
43285- 74 74 74 62 62 62 22 22 22 6 6 6
43286- 0 0 0 0 0 0 0 0 0 0 0 0
43287- 0 0 0 0 0 0 0 0 0 0 0 0
43288- 0 0 0 0 0 0 0 0 0 0 0 0
43289- 0 0 0 0 0 0 0 0 0 0 0 0
43290- 0 0 0 0 0 0 0 0 0 0 0 0
43291- 0 0 0 0 0 0 0 0 0 0 0 0
43292- 0 0 0 0 0 0 0 0 1 0 0 1
43293- 0 0 1 0 0 0 0 0 1 0 0 0
43294- 0 0 0 0 0 0 0 0 0 0 0 0
43295- 0 0 0 0 0 0 0 0 0 0 0 0
43296- 0 0 0 0 0 0 0 0 0 0 0 0
43297- 0 0 0 0 0 0 0 0 0 0 0 0
43298- 0 0 0 0 0 0 0 0 0 10 10 10
43299- 38 38 38 90 90 90 14 14 14 58 58 58
43300-210 210 210 26 26 26 54 38 6 154 114 10
43301-226 170 11 236 186 11 225 175 15 184 144 12
43302-215 174 15 175 146 61 37 26 9 2 2 6
43303- 70 70 70 246 246 246 138 138 138 2 2 6
43304- 2 2 6 2 2 6 2 2 6 2 2 6
43305- 70 70 70 66 66 66 26 26 26 6 6 6
43306- 0 0 0 0 0 0 0 0 0 0 0 0
43307- 0 0 0 0 0 0 0 0 0 0 0 0
43308- 0 0 0 0 0 0 0 0 0 0 0 0
43309- 0 0 0 0 0 0 0 0 0 0 0 0
43310- 0 0 0 0 0 0 0 0 0 0 0 0
43311- 0 0 0 0 0 0 0 0 0 0 0 0
43312- 0 0 0 0 0 0 0 0 0 0 0 0
43313- 0 0 0 0 0 0 0 0 0 0 0 0
43314- 0 0 0 0 0 0 0 0 0 0 0 0
43315- 0 0 0 0 0 0 0 0 0 0 0 0
43316- 0 0 0 0 0 0 0 0 0 0 0 0
43317- 0 0 0 0 0 0 0 0 0 0 0 0
43318- 0 0 0 0 0 0 0 0 0 10 10 10
43319- 38 38 38 86 86 86 14 14 14 10 10 10
43320-195 195 195 188 164 115 192 133 9 225 175 15
43321-239 182 13 234 190 10 232 195 16 232 200 30
43322-245 207 45 241 208 19 232 195 16 184 144 12
43323-218 194 134 211 206 186 42 42 42 2 2 6
43324- 2 2 6 2 2 6 2 2 6 2 2 6
43325- 50 50 50 74 74 74 30 30 30 6 6 6
43326- 0 0 0 0 0 0 0 0 0 0 0 0
43327- 0 0 0 0 0 0 0 0 0 0 0 0
43328- 0 0 0 0 0 0 0 0 0 0 0 0
43329- 0 0 0 0 0 0 0 0 0 0 0 0
43330- 0 0 0 0 0 0 0 0 0 0 0 0
43331- 0 0 0 0 0 0 0 0 0 0 0 0
43332- 0 0 0 0 0 0 0 0 0 0 0 0
43333- 0 0 0 0 0 0 0 0 0 0 0 0
43334- 0 0 0 0 0 0 0 0 0 0 0 0
43335- 0 0 0 0 0 0 0 0 0 0 0 0
43336- 0 0 0 0 0 0 0 0 0 0 0 0
43337- 0 0 0 0 0 0 0 0 0 0 0 0
43338- 0 0 0 0 0 0 0 0 0 10 10 10
43339- 34 34 34 86 86 86 14 14 14 2 2 6
43340-121 87 25 192 133 9 219 162 10 239 182 13
43341-236 186 11 232 195 16 241 208 19 244 214 54
43342-246 218 60 246 218 38 246 215 20 241 208 19
43343-241 208 19 226 184 13 121 87 25 2 2 6
43344- 2 2 6 2 2 6 2 2 6 2 2 6
43345- 50 50 50 82 82 82 34 34 34 10 10 10
43346- 0 0 0 0 0 0 0 0 0 0 0 0
43347- 0 0 0 0 0 0 0 0 0 0 0 0
43348- 0 0 0 0 0 0 0 0 0 0 0 0
43349- 0 0 0 0 0 0 0 0 0 0 0 0
43350- 0 0 0 0 0 0 0 0 0 0 0 0
43351- 0 0 0 0 0 0 0 0 0 0 0 0
43352- 0 0 0 0 0 0 0 0 0 0 0 0
43353- 0 0 0 0 0 0 0 0 0 0 0 0
43354- 0 0 0 0 0 0 0 0 0 0 0 0
43355- 0 0 0 0 0 0 0 0 0 0 0 0
43356- 0 0 0 0 0 0 0 0 0 0 0 0
43357- 0 0 0 0 0 0 0 0 0 0 0 0
43358- 0 0 0 0 0 0 0 0 0 10 10 10
43359- 34 34 34 82 82 82 30 30 30 61 42 6
43360-180 123 7 206 145 10 230 174 11 239 182 13
43361-234 190 10 238 202 15 241 208 19 246 218 74
43362-246 218 38 246 215 20 246 215 20 246 215 20
43363-226 184 13 215 174 15 184 144 12 6 6 6
43364- 2 2 6 2 2 6 2 2 6 2 2 6
43365- 26 26 26 94 94 94 42 42 42 14 14 14
43366- 0 0 0 0 0 0 0 0 0 0 0 0
43367- 0 0 0 0 0 0 0 0 0 0 0 0
43368- 0 0 0 0 0 0 0 0 0 0 0 0
43369- 0 0 0 0 0 0 0 0 0 0 0 0
43370- 0 0 0 0 0 0 0 0 0 0 0 0
43371- 0 0 0 0 0 0 0 0 0 0 0 0
43372- 0 0 0 0 0 0 0 0 0 0 0 0
43373- 0 0 0 0 0 0 0 0 0 0 0 0
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 0 0 0 0
43378- 0 0 0 0 0 0 0 0 0 10 10 10
43379- 30 30 30 78 78 78 50 50 50 104 69 6
43380-192 133 9 216 158 10 236 178 12 236 186 11
43381-232 195 16 241 208 19 244 214 54 245 215 43
43382-246 215 20 246 215 20 241 208 19 198 155 10
43383-200 144 11 216 158 10 156 118 10 2 2 6
43384- 2 2 6 2 2 6 2 2 6 2 2 6
43385- 6 6 6 90 90 90 54 54 54 18 18 18
43386- 6 6 6 0 0 0 0 0 0 0 0 0
43387- 0 0 0 0 0 0 0 0 0 0 0 0
43388- 0 0 0 0 0 0 0 0 0 0 0 0
43389- 0 0 0 0 0 0 0 0 0 0 0 0
43390- 0 0 0 0 0 0 0 0 0 0 0 0
43391- 0 0 0 0 0 0 0 0 0 0 0 0
43392- 0 0 0 0 0 0 0 0 0 0 0 0
43393- 0 0 0 0 0 0 0 0 0 0 0 0
43394- 0 0 0 0 0 0 0 0 0 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 0 0 0 0
43398- 0 0 0 0 0 0 0 0 0 10 10 10
43399- 30 30 30 78 78 78 46 46 46 22 22 22
43400-137 92 6 210 162 10 239 182 13 238 190 10
43401-238 202 15 241 208 19 246 215 20 246 215 20
43402-241 208 19 203 166 17 185 133 11 210 150 10
43403-216 158 10 210 150 10 102 78 10 2 2 6
43404- 6 6 6 54 54 54 14 14 14 2 2 6
43405- 2 2 6 62 62 62 74 74 74 30 30 30
43406- 10 10 10 0 0 0 0 0 0 0 0 0
43407- 0 0 0 0 0 0 0 0 0 0 0 0
43408- 0 0 0 0 0 0 0 0 0 0 0 0
43409- 0 0 0 0 0 0 0 0 0 0 0 0
43410- 0 0 0 0 0 0 0 0 0 0 0 0
43411- 0 0 0 0 0 0 0 0 0 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 0 0 0 0 0 0 0 0 0 0 0 0
43415- 0 0 0 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 10 10 10
43419- 34 34 34 78 78 78 50 50 50 6 6 6
43420- 94 70 30 139 102 15 190 146 13 226 184 13
43421-232 200 30 232 195 16 215 174 15 190 146 13
43422-168 122 10 192 133 9 210 150 10 213 154 11
43423-202 150 34 182 157 106 101 98 89 2 2 6
43424- 2 2 6 78 78 78 116 116 116 58 58 58
43425- 2 2 6 22 22 22 90 90 90 46 46 46
43426- 18 18 18 6 6 6 0 0 0 0 0 0
43427- 0 0 0 0 0 0 0 0 0 0 0 0
43428- 0 0 0 0 0 0 0 0 0 0 0 0
43429- 0 0 0 0 0 0 0 0 0 0 0 0
43430- 0 0 0 0 0 0 0 0 0 0 0 0
43431- 0 0 0 0 0 0 0 0 0 0 0 0
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 0 0 0 0 0 0 0 0 0
43434- 0 0 0 0 0 0 0 0 0 0 0 0
43435- 0 0 0 0 0 0 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 0 10 10 10
43439- 38 38 38 86 86 86 50 50 50 6 6 6
43440-128 128 128 174 154 114 156 107 11 168 122 10
43441-198 155 10 184 144 12 197 138 11 200 144 11
43442-206 145 10 206 145 10 197 138 11 188 164 115
43443-195 195 195 198 198 198 174 174 174 14 14 14
43444- 2 2 6 22 22 22 116 116 116 116 116 116
43445- 22 22 22 2 2 6 74 74 74 70 70 70
43446- 30 30 30 10 10 10 0 0 0 0 0 0
43447- 0 0 0 0 0 0 0 0 0 0 0 0
43448- 0 0 0 0 0 0 0 0 0 0 0 0
43449- 0 0 0 0 0 0 0 0 0 0 0 0
43450- 0 0 0 0 0 0 0 0 0 0 0 0
43451- 0 0 0 0 0 0 0 0 0 0 0 0
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 0 0 0 0 0 0 0 0 0 0 0 0
43454- 0 0 0 0 0 0 0 0 0 0 0 0
43455- 0 0 0 0 0 0 0 0 0 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 6 6 6 18 18 18
43459- 50 50 50 101 101 101 26 26 26 10 10 10
43460-138 138 138 190 190 190 174 154 114 156 107 11
43461-197 138 11 200 144 11 197 138 11 192 133 9
43462-180 123 7 190 142 34 190 178 144 187 187 187
43463-202 202 202 221 221 221 214 214 214 66 66 66
43464- 2 2 6 2 2 6 50 50 50 62 62 62
43465- 6 6 6 2 2 6 10 10 10 90 90 90
43466- 50 50 50 18 18 18 6 6 6 0 0 0
43467- 0 0 0 0 0 0 0 0 0 0 0 0
43468- 0 0 0 0 0 0 0 0 0 0 0 0
43469- 0 0 0 0 0 0 0 0 0 0 0 0
43470- 0 0 0 0 0 0 0 0 0 0 0 0
43471- 0 0 0 0 0 0 0 0 0 0 0 0
43472- 0 0 0 0 0 0 0 0 0 0 0 0
43473- 0 0 0 0 0 0 0 0 0 0 0 0
43474- 0 0 0 0 0 0 0 0 0 0 0 0
43475- 0 0 0 0 0 0 0 0 0 0 0 0
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 10 10 10 34 34 34
43479- 74 74 74 74 74 74 2 2 6 6 6 6
43480-144 144 144 198 198 198 190 190 190 178 166 146
43481-154 121 60 156 107 11 156 107 11 168 124 44
43482-174 154 114 187 187 187 190 190 190 210 210 210
43483-246 246 246 253 253 253 253 253 253 182 182 182
43484- 6 6 6 2 2 6 2 2 6 2 2 6
43485- 2 2 6 2 2 6 2 2 6 62 62 62
43486- 74 74 74 34 34 34 14 14 14 0 0 0
43487- 0 0 0 0 0 0 0 0 0 0 0 0
43488- 0 0 0 0 0 0 0 0 0 0 0 0
43489- 0 0 0 0 0 0 0 0 0 0 0 0
43490- 0 0 0 0 0 0 0 0 0 0 0 0
43491- 0 0 0 0 0 0 0 0 0 0 0 0
43492- 0 0 0 0 0 0 0 0 0 0 0 0
43493- 0 0 0 0 0 0 0 0 0 0 0 0
43494- 0 0 0 0 0 0 0 0 0 0 0 0
43495- 0 0 0 0 0 0 0 0 0 0 0 0
43496- 0 0 0 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 10 10 10 22 22 22 54 54 54
43499- 94 94 94 18 18 18 2 2 6 46 46 46
43500-234 234 234 221 221 221 190 190 190 190 190 190
43501-190 190 190 187 187 187 187 187 187 190 190 190
43502-190 190 190 195 195 195 214 214 214 242 242 242
43503-253 253 253 253 253 253 253 253 253 253 253 253
43504- 82 82 82 2 2 6 2 2 6 2 2 6
43505- 2 2 6 2 2 6 2 2 6 14 14 14
43506- 86 86 86 54 54 54 22 22 22 6 6 6
43507- 0 0 0 0 0 0 0 0 0 0 0 0
43508- 0 0 0 0 0 0 0 0 0 0 0 0
43509- 0 0 0 0 0 0 0 0 0 0 0 0
43510- 0 0 0 0 0 0 0 0 0 0 0 0
43511- 0 0 0 0 0 0 0 0 0 0 0 0
43512- 0 0 0 0 0 0 0 0 0 0 0 0
43513- 0 0 0 0 0 0 0 0 0 0 0 0
43514- 0 0 0 0 0 0 0 0 0 0 0 0
43515- 0 0 0 0 0 0 0 0 0 0 0 0
43516- 0 0 0 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 6 6 6 18 18 18 46 46 46 90 90 90
43519- 46 46 46 18 18 18 6 6 6 182 182 182
43520-253 253 253 246 246 246 206 206 206 190 190 190
43521-190 190 190 190 190 190 190 190 190 190 190 190
43522-206 206 206 231 231 231 250 250 250 253 253 253
43523-253 253 253 253 253 253 253 253 253 253 253 253
43524-202 202 202 14 14 14 2 2 6 2 2 6
43525- 2 2 6 2 2 6 2 2 6 2 2 6
43526- 42 42 42 86 86 86 42 42 42 18 18 18
43527- 6 6 6 0 0 0 0 0 0 0 0 0
43528- 0 0 0 0 0 0 0 0 0 0 0 0
43529- 0 0 0 0 0 0 0 0 0 0 0 0
43530- 0 0 0 0 0 0 0 0 0 0 0 0
43531- 0 0 0 0 0 0 0 0 0 0 0 0
43532- 0 0 0 0 0 0 0 0 0 0 0 0
43533- 0 0 0 0 0 0 0 0 0 0 0 0
43534- 0 0 0 0 0 0 0 0 0 0 0 0
43535- 0 0 0 0 0 0 0 0 0 0 0 0
43536- 0 0 0 0 0 0 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 6 6 6
43538- 14 14 14 38 38 38 74 74 74 66 66 66
43539- 2 2 6 6 6 6 90 90 90 250 250 250
43540-253 253 253 253 253 253 238 238 238 198 198 198
43541-190 190 190 190 190 190 195 195 195 221 221 221
43542-246 246 246 253 253 253 253 253 253 253 253 253
43543-253 253 253 253 253 253 253 253 253 253 253 253
43544-253 253 253 82 82 82 2 2 6 2 2 6
43545- 2 2 6 2 2 6 2 2 6 2 2 6
43546- 2 2 6 78 78 78 70 70 70 34 34 34
43547- 14 14 14 6 6 6 0 0 0 0 0 0
43548- 0 0 0 0 0 0 0 0 0 0 0 0
43549- 0 0 0 0 0 0 0 0 0 0 0 0
43550- 0 0 0 0 0 0 0 0 0 0 0 0
43551- 0 0 0 0 0 0 0 0 0 0 0 0
43552- 0 0 0 0 0 0 0 0 0 0 0 0
43553- 0 0 0 0 0 0 0 0 0 0 0 0
43554- 0 0 0 0 0 0 0 0 0 0 0 0
43555- 0 0 0 0 0 0 0 0 0 0 0 0
43556- 0 0 0 0 0 0 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 14 14 14
43558- 34 34 34 66 66 66 78 78 78 6 6 6
43559- 2 2 6 18 18 18 218 218 218 253 253 253
43560-253 253 253 253 253 253 253 253 253 246 246 246
43561-226 226 226 231 231 231 246 246 246 253 253 253
43562-253 253 253 253 253 253 253 253 253 253 253 253
43563-253 253 253 253 253 253 253 253 253 253 253 253
43564-253 253 253 178 178 178 2 2 6 2 2 6
43565- 2 2 6 2 2 6 2 2 6 2 2 6
43566- 2 2 6 18 18 18 90 90 90 62 62 62
43567- 30 30 30 10 10 10 0 0 0 0 0 0
43568- 0 0 0 0 0 0 0 0 0 0 0 0
43569- 0 0 0 0 0 0 0 0 0 0 0 0
43570- 0 0 0 0 0 0 0 0 0 0 0 0
43571- 0 0 0 0 0 0 0 0 0 0 0 0
43572- 0 0 0 0 0 0 0 0 0 0 0 0
43573- 0 0 0 0 0 0 0 0 0 0 0 0
43574- 0 0 0 0 0 0 0 0 0 0 0 0
43575- 0 0 0 0 0 0 0 0 0 0 0 0
43576- 0 0 0 0 0 0 0 0 0 0 0 0
43577- 0 0 0 0 0 0 10 10 10 26 26 26
43578- 58 58 58 90 90 90 18 18 18 2 2 6
43579- 2 2 6 110 110 110 253 253 253 253 253 253
43580-253 253 253 253 253 253 253 253 253 253 253 253
43581-250 250 250 253 253 253 253 253 253 253 253 253
43582-253 253 253 253 253 253 253 253 253 253 253 253
43583-253 253 253 253 253 253 253 253 253 253 253 253
43584-253 253 253 231 231 231 18 18 18 2 2 6
43585- 2 2 6 2 2 6 2 2 6 2 2 6
43586- 2 2 6 2 2 6 18 18 18 94 94 94
43587- 54 54 54 26 26 26 10 10 10 0 0 0
43588- 0 0 0 0 0 0 0 0 0 0 0 0
43589- 0 0 0 0 0 0 0 0 0 0 0 0
43590- 0 0 0 0 0 0 0 0 0 0 0 0
43591- 0 0 0 0 0 0 0 0 0 0 0 0
43592- 0 0 0 0 0 0 0 0 0 0 0 0
43593- 0 0 0 0 0 0 0 0 0 0 0 0
43594- 0 0 0 0 0 0 0 0 0 0 0 0
43595- 0 0 0 0 0 0 0 0 0 0 0 0
43596- 0 0 0 0 0 0 0 0 0 0 0 0
43597- 0 0 0 6 6 6 22 22 22 50 50 50
43598- 90 90 90 26 26 26 2 2 6 2 2 6
43599- 14 14 14 195 195 195 250 250 250 253 253 253
43600-253 253 253 253 253 253 253 253 253 253 253 253
43601-253 253 253 253 253 253 253 253 253 253 253 253
43602-253 253 253 253 253 253 253 253 253 253 253 253
43603-253 253 253 253 253 253 253 253 253 253 253 253
43604-250 250 250 242 242 242 54 54 54 2 2 6
43605- 2 2 6 2 2 6 2 2 6 2 2 6
43606- 2 2 6 2 2 6 2 2 6 38 38 38
43607- 86 86 86 50 50 50 22 22 22 6 6 6
43608- 0 0 0 0 0 0 0 0 0 0 0 0
43609- 0 0 0 0 0 0 0 0 0 0 0 0
43610- 0 0 0 0 0 0 0 0 0 0 0 0
43611- 0 0 0 0 0 0 0 0 0 0 0 0
43612- 0 0 0 0 0 0 0 0 0 0 0 0
43613- 0 0 0 0 0 0 0 0 0 0 0 0
43614- 0 0 0 0 0 0 0 0 0 0 0 0
43615- 0 0 0 0 0 0 0 0 0 0 0 0
43616- 0 0 0 0 0 0 0 0 0 0 0 0
43617- 6 6 6 14 14 14 38 38 38 82 82 82
43618- 34 34 34 2 2 6 2 2 6 2 2 6
43619- 42 42 42 195 195 195 246 246 246 253 253 253
43620-253 253 253 253 253 253 253 253 253 250 250 250
43621-242 242 242 242 242 242 250 250 250 253 253 253
43622-253 253 253 253 253 253 253 253 253 253 253 253
43623-253 253 253 250 250 250 246 246 246 238 238 238
43624-226 226 226 231 231 231 101 101 101 6 6 6
43625- 2 2 6 2 2 6 2 2 6 2 2 6
43626- 2 2 6 2 2 6 2 2 6 2 2 6
43627- 38 38 38 82 82 82 42 42 42 14 14 14
43628- 6 6 6 0 0 0 0 0 0 0 0 0
43629- 0 0 0 0 0 0 0 0 0 0 0 0
43630- 0 0 0 0 0 0 0 0 0 0 0 0
43631- 0 0 0 0 0 0 0 0 0 0 0 0
43632- 0 0 0 0 0 0 0 0 0 0 0 0
43633- 0 0 0 0 0 0 0 0 0 0 0 0
43634- 0 0 0 0 0 0 0 0 0 0 0 0
43635- 0 0 0 0 0 0 0 0 0 0 0 0
43636- 0 0 0 0 0 0 0 0 0 0 0 0
43637- 10 10 10 26 26 26 62 62 62 66 66 66
43638- 2 2 6 2 2 6 2 2 6 6 6 6
43639- 70 70 70 170 170 170 206 206 206 234 234 234
43640-246 246 246 250 250 250 250 250 250 238 238 238
43641-226 226 226 231 231 231 238 238 238 250 250 250
43642-250 250 250 250 250 250 246 246 246 231 231 231
43643-214 214 214 206 206 206 202 202 202 202 202 202
43644-198 198 198 202 202 202 182 182 182 18 18 18
43645- 2 2 6 2 2 6 2 2 6 2 2 6
43646- 2 2 6 2 2 6 2 2 6 2 2 6
43647- 2 2 6 62 62 62 66 66 66 30 30 30
43648- 10 10 10 0 0 0 0 0 0 0 0 0
43649- 0 0 0 0 0 0 0 0 0 0 0 0
43650- 0 0 0 0 0 0 0 0 0 0 0 0
43651- 0 0 0 0 0 0 0 0 0 0 0 0
43652- 0 0 0 0 0 0 0 0 0 0 0 0
43653- 0 0 0 0 0 0 0 0 0 0 0 0
43654- 0 0 0 0 0 0 0 0 0 0 0 0
43655- 0 0 0 0 0 0 0 0 0 0 0 0
43656- 0 0 0 0 0 0 0 0 0 0 0 0
43657- 14 14 14 42 42 42 82 82 82 18 18 18
43658- 2 2 6 2 2 6 2 2 6 10 10 10
43659- 94 94 94 182 182 182 218 218 218 242 242 242
43660-250 250 250 253 253 253 253 253 253 250 250 250
43661-234 234 234 253 253 253 253 253 253 253 253 253
43662-253 253 253 253 253 253 253 253 253 246 246 246
43663-238 238 238 226 226 226 210 210 210 202 202 202
43664-195 195 195 195 195 195 210 210 210 158 158 158
43665- 6 6 6 14 14 14 50 50 50 14 14 14
43666- 2 2 6 2 2 6 2 2 6 2 2 6
43667- 2 2 6 6 6 6 86 86 86 46 46 46
43668- 18 18 18 6 6 6 0 0 0 0 0 0
43669- 0 0 0 0 0 0 0 0 0 0 0 0
43670- 0 0 0 0 0 0 0 0 0 0 0 0
43671- 0 0 0 0 0 0 0 0 0 0 0 0
43672- 0 0 0 0 0 0 0 0 0 0 0 0
43673- 0 0 0 0 0 0 0 0 0 0 0 0
43674- 0 0 0 0 0 0 0 0 0 0 0 0
43675- 0 0 0 0 0 0 0 0 0 0 0 0
43676- 0 0 0 0 0 0 0 0 0 6 6 6
43677- 22 22 22 54 54 54 70 70 70 2 2 6
43678- 2 2 6 10 10 10 2 2 6 22 22 22
43679-166 166 166 231 231 231 250 250 250 253 253 253
43680-253 253 253 253 253 253 253 253 253 250 250 250
43681-242 242 242 253 253 253 253 253 253 253 253 253
43682-253 253 253 253 253 253 253 253 253 253 253 253
43683-253 253 253 253 253 253 253 253 253 246 246 246
43684-231 231 231 206 206 206 198 198 198 226 226 226
43685- 94 94 94 2 2 6 6 6 6 38 38 38
43686- 30 30 30 2 2 6 2 2 6 2 2 6
43687- 2 2 6 2 2 6 62 62 62 66 66 66
43688- 26 26 26 10 10 10 0 0 0 0 0 0
43689- 0 0 0 0 0 0 0 0 0 0 0 0
43690- 0 0 0 0 0 0 0 0 0 0 0 0
43691- 0 0 0 0 0 0 0 0 0 0 0 0
43692- 0 0 0 0 0 0 0 0 0 0 0 0
43693- 0 0 0 0 0 0 0 0 0 0 0 0
43694- 0 0 0 0 0 0 0 0 0 0 0 0
43695- 0 0 0 0 0 0 0 0 0 0 0 0
43696- 0 0 0 0 0 0 0 0 0 10 10 10
43697- 30 30 30 74 74 74 50 50 50 2 2 6
43698- 26 26 26 26 26 26 2 2 6 106 106 106
43699-238 238 238 253 253 253 253 253 253 253 253 253
43700-253 253 253 253 253 253 253 253 253 253 253 253
43701-253 253 253 253 253 253 253 253 253 253 253 253
43702-253 253 253 253 253 253 253 253 253 253 253 253
43703-253 253 253 253 253 253 253 253 253 253 253 253
43704-253 253 253 246 246 246 218 218 218 202 202 202
43705-210 210 210 14 14 14 2 2 6 2 2 6
43706- 30 30 30 22 22 22 2 2 6 2 2 6
43707- 2 2 6 2 2 6 18 18 18 86 86 86
43708- 42 42 42 14 14 14 0 0 0 0 0 0
43709- 0 0 0 0 0 0 0 0 0 0 0 0
43710- 0 0 0 0 0 0 0 0 0 0 0 0
43711- 0 0 0 0 0 0 0 0 0 0 0 0
43712- 0 0 0 0 0 0 0 0 0 0 0 0
43713- 0 0 0 0 0 0 0 0 0 0 0 0
43714- 0 0 0 0 0 0 0 0 0 0 0 0
43715- 0 0 0 0 0 0 0 0 0 0 0 0
43716- 0 0 0 0 0 0 0 0 0 14 14 14
43717- 42 42 42 90 90 90 22 22 22 2 2 6
43718- 42 42 42 2 2 6 18 18 18 218 218 218
43719-253 253 253 253 253 253 253 253 253 253 253 253
43720-253 253 253 253 253 253 253 253 253 253 253 253
43721-253 253 253 253 253 253 253 253 253 253 253 253
43722-253 253 253 253 253 253 253 253 253 253 253 253
43723-253 253 253 253 253 253 253 253 253 253 253 253
43724-253 253 253 253 253 253 250 250 250 221 221 221
43725-218 218 218 101 101 101 2 2 6 14 14 14
43726- 18 18 18 38 38 38 10 10 10 2 2 6
43727- 2 2 6 2 2 6 2 2 6 78 78 78
43728- 58 58 58 22 22 22 6 6 6 0 0 0
43729- 0 0 0 0 0 0 0 0 0 0 0 0
43730- 0 0 0 0 0 0 0 0 0 0 0 0
43731- 0 0 0 0 0 0 0 0 0 0 0 0
43732- 0 0 0 0 0 0 0 0 0 0 0 0
43733- 0 0 0 0 0 0 0 0 0 0 0 0
43734- 0 0 0 0 0 0 0 0 0 0 0 0
43735- 0 0 0 0 0 0 0 0 0 0 0 0
43736- 0 0 0 0 0 0 6 6 6 18 18 18
43737- 54 54 54 82 82 82 2 2 6 26 26 26
43738- 22 22 22 2 2 6 123 123 123 253 253 253
43739-253 253 253 253 253 253 253 253 253 253 253 253
43740-253 253 253 253 253 253 253 253 253 253 253 253
43741-253 253 253 253 253 253 253 253 253 253 253 253
43742-253 253 253 253 253 253 253 253 253 253 253 253
43743-253 253 253 253 253 253 253 253 253 253 253 253
43744-253 253 253 253 253 253 253 253 253 250 250 250
43745-238 238 238 198 198 198 6 6 6 38 38 38
43746- 58 58 58 26 26 26 38 38 38 2 2 6
43747- 2 2 6 2 2 6 2 2 6 46 46 46
43748- 78 78 78 30 30 30 10 10 10 0 0 0
43749- 0 0 0 0 0 0 0 0 0 0 0 0
43750- 0 0 0 0 0 0 0 0 0 0 0 0
43751- 0 0 0 0 0 0 0 0 0 0 0 0
43752- 0 0 0 0 0 0 0 0 0 0 0 0
43753- 0 0 0 0 0 0 0 0 0 0 0 0
43754- 0 0 0 0 0 0 0 0 0 0 0 0
43755- 0 0 0 0 0 0 0 0 0 0 0 0
43756- 0 0 0 0 0 0 10 10 10 30 30 30
43757- 74 74 74 58 58 58 2 2 6 42 42 42
43758- 2 2 6 22 22 22 231 231 231 253 253 253
43759-253 253 253 253 253 253 253 253 253 253 253 253
43760-253 253 253 253 253 253 253 253 253 250 250 250
43761-253 253 253 253 253 253 253 253 253 253 253 253
43762-253 253 253 253 253 253 253 253 253 253 253 253
43763-253 253 253 253 253 253 253 253 253 253 253 253
43764-253 253 253 253 253 253 253 253 253 253 253 253
43765-253 253 253 246 246 246 46 46 46 38 38 38
43766- 42 42 42 14 14 14 38 38 38 14 14 14
43767- 2 2 6 2 2 6 2 2 6 6 6 6
43768- 86 86 86 46 46 46 14 14 14 0 0 0
43769- 0 0 0 0 0 0 0 0 0 0 0 0
43770- 0 0 0 0 0 0 0 0 0 0 0 0
43771- 0 0 0 0 0 0 0 0 0 0 0 0
43772- 0 0 0 0 0 0 0 0 0 0 0 0
43773- 0 0 0 0 0 0 0 0 0 0 0 0
43774- 0 0 0 0 0 0 0 0 0 0 0 0
43775- 0 0 0 0 0 0 0 0 0 0 0 0
43776- 0 0 0 6 6 6 14 14 14 42 42 42
43777- 90 90 90 18 18 18 18 18 18 26 26 26
43778- 2 2 6 116 116 116 253 253 253 253 253 253
43779-253 253 253 253 253 253 253 253 253 253 253 253
43780-253 253 253 253 253 253 250 250 250 238 238 238
43781-253 253 253 253 253 253 253 253 253 253 253 253
43782-253 253 253 253 253 253 253 253 253 253 253 253
43783-253 253 253 253 253 253 253 253 253 253 253 253
43784-253 253 253 253 253 253 253 253 253 253 253 253
43785-253 253 253 253 253 253 94 94 94 6 6 6
43786- 2 2 6 2 2 6 10 10 10 34 34 34
43787- 2 2 6 2 2 6 2 2 6 2 2 6
43788- 74 74 74 58 58 58 22 22 22 6 6 6
43789- 0 0 0 0 0 0 0 0 0 0 0 0
43790- 0 0 0 0 0 0 0 0 0 0 0 0
43791- 0 0 0 0 0 0 0 0 0 0 0 0
43792- 0 0 0 0 0 0 0 0 0 0 0 0
43793- 0 0 0 0 0 0 0 0 0 0 0 0
43794- 0 0 0 0 0 0 0 0 0 0 0 0
43795- 0 0 0 0 0 0 0 0 0 0 0 0
43796- 0 0 0 10 10 10 26 26 26 66 66 66
43797- 82 82 82 2 2 6 38 38 38 6 6 6
43798- 14 14 14 210 210 210 253 253 253 253 253 253
43799-253 253 253 253 253 253 253 253 253 253 253 253
43800-253 253 253 253 253 253 246 246 246 242 242 242
43801-253 253 253 253 253 253 253 253 253 253 253 253
43802-253 253 253 253 253 253 253 253 253 253 253 253
43803-253 253 253 253 253 253 253 253 253 253 253 253
43804-253 253 253 253 253 253 253 253 253 253 253 253
43805-253 253 253 253 253 253 144 144 144 2 2 6
43806- 2 2 6 2 2 6 2 2 6 46 46 46
43807- 2 2 6 2 2 6 2 2 6 2 2 6
43808- 42 42 42 74 74 74 30 30 30 10 10 10
43809- 0 0 0 0 0 0 0 0 0 0 0 0
43810- 0 0 0 0 0 0 0 0 0 0 0 0
43811- 0 0 0 0 0 0 0 0 0 0 0 0
43812- 0 0 0 0 0 0 0 0 0 0 0 0
43813- 0 0 0 0 0 0 0 0 0 0 0 0
43814- 0 0 0 0 0 0 0 0 0 0 0 0
43815- 0 0 0 0 0 0 0 0 0 0 0 0
43816- 6 6 6 14 14 14 42 42 42 90 90 90
43817- 26 26 26 6 6 6 42 42 42 2 2 6
43818- 74 74 74 250 250 250 253 253 253 253 253 253
43819-253 253 253 253 253 253 253 253 253 253 253 253
43820-253 253 253 253 253 253 242 242 242 242 242 242
43821-253 253 253 253 253 253 253 253 253 253 253 253
43822-253 253 253 253 253 253 253 253 253 253 253 253
43823-253 253 253 253 253 253 253 253 253 253 253 253
43824-253 253 253 253 253 253 253 253 253 253 253 253
43825-253 253 253 253 253 253 182 182 182 2 2 6
43826- 2 2 6 2 2 6 2 2 6 46 46 46
43827- 2 2 6 2 2 6 2 2 6 2 2 6
43828- 10 10 10 86 86 86 38 38 38 10 10 10
43829- 0 0 0 0 0 0 0 0 0 0 0 0
43830- 0 0 0 0 0 0 0 0 0 0 0 0
43831- 0 0 0 0 0 0 0 0 0 0 0 0
43832- 0 0 0 0 0 0 0 0 0 0 0 0
43833- 0 0 0 0 0 0 0 0 0 0 0 0
43834- 0 0 0 0 0 0 0 0 0 0 0 0
43835- 0 0 0 0 0 0 0 0 0 0 0 0
43836- 10 10 10 26 26 26 66 66 66 82 82 82
43837- 2 2 6 22 22 22 18 18 18 2 2 6
43838-149 149 149 253 253 253 253 253 253 253 253 253
43839-253 253 253 253 253 253 253 253 253 253 253 253
43840-253 253 253 253 253 253 234 234 234 242 242 242
43841-253 253 253 253 253 253 253 253 253 253 253 253
43842-253 253 253 253 253 253 253 253 253 253 253 253
43843-253 253 253 253 253 253 253 253 253 253 253 253
43844-253 253 253 253 253 253 253 253 253 253 253 253
43845-253 253 253 253 253 253 206 206 206 2 2 6
43846- 2 2 6 2 2 6 2 2 6 38 38 38
43847- 2 2 6 2 2 6 2 2 6 2 2 6
43848- 6 6 6 86 86 86 46 46 46 14 14 14
43849- 0 0 0 0 0 0 0 0 0 0 0 0
43850- 0 0 0 0 0 0 0 0 0 0 0 0
43851- 0 0 0 0 0 0 0 0 0 0 0 0
43852- 0 0 0 0 0 0 0 0 0 0 0 0
43853- 0 0 0 0 0 0 0 0 0 0 0 0
43854- 0 0 0 0 0 0 0 0 0 0 0 0
43855- 0 0 0 0 0 0 0 0 0 6 6 6
43856- 18 18 18 46 46 46 86 86 86 18 18 18
43857- 2 2 6 34 34 34 10 10 10 6 6 6
43858-210 210 210 253 253 253 253 253 253 253 253 253
43859-253 253 253 253 253 253 253 253 253 253 253 253
43860-253 253 253 253 253 253 234 234 234 242 242 242
43861-253 253 253 253 253 253 253 253 253 253 253 253
43862-253 253 253 253 253 253 253 253 253 253 253 253
43863-253 253 253 253 253 253 253 253 253 253 253 253
43864-253 253 253 253 253 253 253 253 253 253 253 253
43865-253 253 253 253 253 253 221 221 221 6 6 6
43866- 2 2 6 2 2 6 6 6 6 30 30 30
43867- 2 2 6 2 2 6 2 2 6 2 2 6
43868- 2 2 6 82 82 82 54 54 54 18 18 18
43869- 6 6 6 0 0 0 0 0 0 0 0 0
43870- 0 0 0 0 0 0 0 0 0 0 0 0
43871- 0 0 0 0 0 0 0 0 0 0 0 0
43872- 0 0 0 0 0 0 0 0 0 0 0 0
43873- 0 0 0 0 0 0 0 0 0 0 0 0
43874- 0 0 0 0 0 0 0 0 0 0 0 0
43875- 0 0 0 0 0 0 0 0 0 10 10 10
43876- 26 26 26 66 66 66 62 62 62 2 2 6
43877- 2 2 6 38 38 38 10 10 10 26 26 26
43878-238 238 238 253 253 253 253 253 253 253 253 253
43879-253 253 253 253 253 253 253 253 253 253 253 253
43880-253 253 253 253 253 253 231 231 231 238 238 238
43881-253 253 253 253 253 253 253 253 253 253 253 253
43882-253 253 253 253 253 253 253 253 253 253 253 253
43883-253 253 253 253 253 253 253 253 253 253 253 253
43884-253 253 253 253 253 253 253 253 253 253 253 253
43885-253 253 253 253 253 253 231 231 231 6 6 6
43886- 2 2 6 2 2 6 10 10 10 30 30 30
43887- 2 2 6 2 2 6 2 2 6 2 2 6
43888- 2 2 6 66 66 66 58 58 58 22 22 22
43889- 6 6 6 0 0 0 0 0 0 0 0 0
43890- 0 0 0 0 0 0 0 0 0 0 0 0
43891- 0 0 0 0 0 0 0 0 0 0 0 0
43892- 0 0 0 0 0 0 0 0 0 0 0 0
43893- 0 0 0 0 0 0 0 0 0 0 0 0
43894- 0 0 0 0 0 0 0 0 0 0 0 0
43895- 0 0 0 0 0 0 0 0 0 10 10 10
43896- 38 38 38 78 78 78 6 6 6 2 2 6
43897- 2 2 6 46 46 46 14 14 14 42 42 42
43898-246 246 246 253 253 253 253 253 253 253 253 253
43899-253 253 253 253 253 253 253 253 253 253 253 253
43900-253 253 253 253 253 253 231 231 231 242 242 242
43901-253 253 253 253 253 253 253 253 253 253 253 253
43902-253 253 253 253 253 253 253 253 253 253 253 253
43903-253 253 253 253 253 253 253 253 253 253 253 253
43904-253 253 253 253 253 253 253 253 253 253 253 253
43905-253 253 253 253 253 253 234 234 234 10 10 10
43906- 2 2 6 2 2 6 22 22 22 14 14 14
43907- 2 2 6 2 2 6 2 2 6 2 2 6
43908- 2 2 6 66 66 66 62 62 62 22 22 22
43909- 6 6 6 0 0 0 0 0 0 0 0 0
43910- 0 0 0 0 0 0 0 0 0 0 0 0
43911- 0 0 0 0 0 0 0 0 0 0 0 0
43912- 0 0 0 0 0 0 0 0 0 0 0 0
43913- 0 0 0 0 0 0 0 0 0 0 0 0
43914- 0 0 0 0 0 0 0 0 0 0 0 0
43915- 0 0 0 0 0 0 6 6 6 18 18 18
43916- 50 50 50 74 74 74 2 2 6 2 2 6
43917- 14 14 14 70 70 70 34 34 34 62 62 62
43918-250 250 250 253 253 253 253 253 253 253 253 253
43919-253 253 253 253 253 253 253 253 253 253 253 253
43920-253 253 253 253 253 253 231 231 231 246 246 246
43921-253 253 253 253 253 253 253 253 253 253 253 253
43922-253 253 253 253 253 253 253 253 253 253 253 253
43923-253 253 253 253 253 253 253 253 253 253 253 253
43924-253 253 253 253 253 253 253 253 253 253 253 253
43925-253 253 253 253 253 253 234 234 234 14 14 14
43926- 2 2 6 2 2 6 30 30 30 2 2 6
43927- 2 2 6 2 2 6 2 2 6 2 2 6
43928- 2 2 6 66 66 66 62 62 62 22 22 22
43929- 6 6 6 0 0 0 0 0 0 0 0 0
43930- 0 0 0 0 0 0 0 0 0 0 0 0
43931- 0 0 0 0 0 0 0 0 0 0 0 0
43932- 0 0 0 0 0 0 0 0 0 0 0 0
43933- 0 0 0 0 0 0 0 0 0 0 0 0
43934- 0 0 0 0 0 0 0 0 0 0 0 0
43935- 0 0 0 0 0 0 6 6 6 18 18 18
43936- 54 54 54 62 62 62 2 2 6 2 2 6
43937- 2 2 6 30 30 30 46 46 46 70 70 70
43938-250 250 250 253 253 253 253 253 253 253 253 253
43939-253 253 253 253 253 253 253 253 253 253 253 253
43940-253 253 253 253 253 253 231 231 231 246 246 246
43941-253 253 253 253 253 253 253 253 253 253 253 253
43942-253 253 253 253 253 253 253 253 253 253 253 253
43943-253 253 253 253 253 253 253 253 253 253 253 253
43944-253 253 253 253 253 253 253 253 253 253 253 253
43945-253 253 253 253 253 253 226 226 226 10 10 10
43946- 2 2 6 6 6 6 30 30 30 2 2 6
43947- 2 2 6 2 2 6 2 2 6 2 2 6
43948- 2 2 6 66 66 66 58 58 58 22 22 22
43949- 6 6 6 0 0 0 0 0 0 0 0 0
43950- 0 0 0 0 0 0 0 0 0 0 0 0
43951- 0 0 0 0 0 0 0 0 0 0 0 0
43952- 0 0 0 0 0 0 0 0 0 0 0 0
43953- 0 0 0 0 0 0 0 0 0 0 0 0
43954- 0 0 0 0 0 0 0 0 0 0 0 0
43955- 0 0 0 0 0 0 6 6 6 22 22 22
43956- 58 58 58 62 62 62 2 2 6 2 2 6
43957- 2 2 6 2 2 6 30 30 30 78 78 78
43958-250 250 250 253 253 253 253 253 253 253 253 253
43959-253 253 253 253 253 253 253 253 253 253 253 253
43960-253 253 253 253 253 253 231 231 231 246 246 246
43961-253 253 253 253 253 253 253 253 253 253 253 253
43962-253 253 253 253 253 253 253 253 253 253 253 253
43963-253 253 253 253 253 253 253 253 253 253 253 253
43964-253 253 253 253 253 253 253 253 253 253 253 253
43965-253 253 253 253 253 253 206 206 206 2 2 6
43966- 22 22 22 34 34 34 18 14 6 22 22 22
43967- 26 26 26 18 18 18 6 6 6 2 2 6
43968- 2 2 6 82 82 82 54 54 54 18 18 18
43969- 6 6 6 0 0 0 0 0 0 0 0 0
43970- 0 0 0 0 0 0 0 0 0 0 0 0
43971- 0 0 0 0 0 0 0 0 0 0 0 0
43972- 0 0 0 0 0 0 0 0 0 0 0 0
43973- 0 0 0 0 0 0 0 0 0 0 0 0
43974- 0 0 0 0 0 0 0 0 0 0 0 0
43975- 0 0 0 0 0 0 6 6 6 26 26 26
43976- 62 62 62 106 106 106 74 54 14 185 133 11
43977-210 162 10 121 92 8 6 6 6 62 62 62
43978-238 238 238 253 253 253 253 253 253 253 253 253
43979-253 253 253 253 253 253 253 253 253 253 253 253
43980-253 253 253 253 253 253 231 231 231 246 246 246
43981-253 253 253 253 253 253 253 253 253 253 253 253
43982-253 253 253 253 253 253 253 253 253 253 253 253
43983-253 253 253 253 253 253 253 253 253 253 253 253
43984-253 253 253 253 253 253 253 253 253 253 253 253
43985-253 253 253 253 253 253 158 158 158 18 18 18
43986- 14 14 14 2 2 6 2 2 6 2 2 6
43987- 6 6 6 18 18 18 66 66 66 38 38 38
43988- 6 6 6 94 94 94 50 50 50 18 18 18
43989- 6 6 6 0 0 0 0 0 0 0 0 0
43990- 0 0 0 0 0 0 0 0 0 0 0 0
43991- 0 0 0 0 0 0 0 0 0 0 0 0
43992- 0 0 0 0 0 0 0 0 0 0 0 0
43993- 0 0 0 0 0 0 0 0 0 0 0 0
43994- 0 0 0 0 0 0 0 0 0 6 6 6
43995- 10 10 10 10 10 10 18 18 18 38 38 38
43996- 78 78 78 142 134 106 216 158 10 242 186 14
43997-246 190 14 246 190 14 156 118 10 10 10 10
43998- 90 90 90 238 238 238 253 253 253 253 253 253
43999-253 253 253 253 253 253 253 253 253 253 253 253
44000-253 253 253 253 253 253 231 231 231 250 250 250
44001-253 253 253 253 253 253 253 253 253 253 253 253
44002-253 253 253 253 253 253 253 253 253 253 253 253
44003-253 253 253 253 253 253 253 253 253 253 253 253
44004-253 253 253 253 253 253 253 253 253 246 230 190
44005-238 204 91 238 204 91 181 142 44 37 26 9
44006- 2 2 6 2 2 6 2 2 6 2 2 6
44007- 2 2 6 2 2 6 38 38 38 46 46 46
44008- 26 26 26 106 106 106 54 54 54 18 18 18
44009- 6 6 6 0 0 0 0 0 0 0 0 0
44010- 0 0 0 0 0 0 0 0 0 0 0 0
44011- 0 0 0 0 0 0 0 0 0 0 0 0
44012- 0 0 0 0 0 0 0 0 0 0 0 0
44013- 0 0 0 0 0 0 0 0 0 0 0 0
44014- 0 0 0 6 6 6 14 14 14 22 22 22
44015- 30 30 30 38 38 38 50 50 50 70 70 70
44016-106 106 106 190 142 34 226 170 11 242 186 14
44017-246 190 14 246 190 14 246 190 14 154 114 10
44018- 6 6 6 74 74 74 226 226 226 253 253 253
44019-253 253 253 253 253 253 253 253 253 253 253 253
44020-253 253 253 253 253 253 231 231 231 250 250 250
44021-253 253 253 253 253 253 253 253 253 253 253 253
44022-253 253 253 253 253 253 253 253 253 253 253 253
44023-253 253 253 253 253 253 253 253 253 253 253 253
44024-253 253 253 253 253 253 253 253 253 228 184 62
44025-241 196 14 241 208 19 232 195 16 38 30 10
44026- 2 2 6 2 2 6 2 2 6 2 2 6
44027- 2 2 6 6 6 6 30 30 30 26 26 26
44028-203 166 17 154 142 90 66 66 66 26 26 26
44029- 6 6 6 0 0 0 0 0 0 0 0 0
44030- 0 0 0 0 0 0 0 0 0 0 0 0
44031- 0 0 0 0 0 0 0 0 0 0 0 0
44032- 0 0 0 0 0 0 0 0 0 0 0 0
44033- 0 0 0 0 0 0 0 0 0 0 0 0
44034- 6 6 6 18 18 18 38 38 38 58 58 58
44035- 78 78 78 86 86 86 101 101 101 123 123 123
44036-175 146 61 210 150 10 234 174 13 246 186 14
44037-246 190 14 246 190 14 246 190 14 238 190 10
44038-102 78 10 2 2 6 46 46 46 198 198 198
44039-253 253 253 253 253 253 253 253 253 253 253 253
44040-253 253 253 253 253 253 234 234 234 242 242 242
44041-253 253 253 253 253 253 253 253 253 253 253 253
44042-253 253 253 253 253 253 253 253 253 253 253 253
44043-253 253 253 253 253 253 253 253 253 253 253 253
44044-253 253 253 253 253 253 253 253 253 224 178 62
44045-242 186 14 241 196 14 210 166 10 22 18 6
44046- 2 2 6 2 2 6 2 2 6 2 2 6
44047- 2 2 6 2 2 6 6 6 6 121 92 8
44048-238 202 15 232 195 16 82 82 82 34 34 34
44049- 10 10 10 0 0 0 0 0 0 0 0 0
44050- 0 0 0 0 0 0 0 0 0 0 0 0
44051- 0 0 0 0 0 0 0 0 0 0 0 0
44052- 0 0 0 0 0 0 0 0 0 0 0 0
44053- 0 0 0 0 0 0 0 0 0 0 0 0
44054- 14 14 14 38 38 38 70 70 70 154 122 46
44055-190 142 34 200 144 11 197 138 11 197 138 11
44056-213 154 11 226 170 11 242 186 14 246 190 14
44057-246 190 14 246 190 14 246 190 14 246 190 14
44058-225 175 15 46 32 6 2 2 6 22 22 22
44059-158 158 158 250 250 250 253 253 253 253 253 253
44060-253 253 253 253 253 253 253 253 253 253 253 253
44061-253 253 253 253 253 253 253 253 253 253 253 253
44062-253 253 253 253 253 253 253 253 253 253 253 253
44063-253 253 253 253 253 253 253 253 253 253 253 253
44064-253 253 253 250 250 250 242 242 242 224 178 62
44065-239 182 13 236 186 11 213 154 11 46 32 6
44066- 2 2 6 2 2 6 2 2 6 2 2 6
44067- 2 2 6 2 2 6 61 42 6 225 175 15
44068-238 190 10 236 186 11 112 100 78 42 42 42
44069- 14 14 14 0 0 0 0 0 0 0 0 0
44070- 0 0 0 0 0 0 0 0 0 0 0 0
44071- 0 0 0 0 0 0 0 0 0 0 0 0
44072- 0 0 0 0 0 0 0 0 0 0 0 0
44073- 0 0 0 0 0 0 0 0 0 6 6 6
44074- 22 22 22 54 54 54 154 122 46 213 154 11
44075-226 170 11 230 174 11 226 170 11 226 170 11
44076-236 178 12 242 186 14 246 190 14 246 190 14
44077-246 190 14 246 190 14 246 190 14 246 190 14
44078-241 196 14 184 144 12 10 10 10 2 2 6
44079- 6 6 6 116 116 116 242 242 242 253 253 253
44080-253 253 253 253 253 253 253 253 253 253 253 253
44081-253 253 253 253 253 253 253 253 253 253 253 253
44082-253 253 253 253 253 253 253 253 253 253 253 253
44083-253 253 253 253 253 253 253 253 253 253 253 253
44084-253 253 253 231 231 231 198 198 198 214 170 54
44085-236 178 12 236 178 12 210 150 10 137 92 6
44086- 18 14 6 2 2 6 2 2 6 2 2 6
44087- 6 6 6 70 47 6 200 144 11 236 178 12
44088-239 182 13 239 182 13 124 112 88 58 58 58
44089- 22 22 22 6 6 6 0 0 0 0 0 0
44090- 0 0 0 0 0 0 0 0 0 0 0 0
44091- 0 0 0 0 0 0 0 0 0 0 0 0
44092- 0 0 0 0 0 0 0 0 0 0 0 0
44093- 0 0 0 0 0 0 0 0 0 10 10 10
44094- 30 30 30 70 70 70 180 133 36 226 170 11
44095-239 182 13 242 186 14 242 186 14 246 186 14
44096-246 190 14 246 190 14 246 190 14 246 190 14
44097-246 190 14 246 190 14 246 190 14 246 190 14
44098-246 190 14 232 195 16 98 70 6 2 2 6
44099- 2 2 6 2 2 6 66 66 66 221 221 221
44100-253 253 253 253 253 253 253 253 253 253 253 253
44101-253 253 253 253 253 253 253 253 253 253 253 253
44102-253 253 253 253 253 253 253 253 253 253 253 253
44103-253 253 253 253 253 253 253 253 253 253 253 253
44104-253 253 253 206 206 206 198 198 198 214 166 58
44105-230 174 11 230 174 11 216 158 10 192 133 9
44106-163 110 8 116 81 8 102 78 10 116 81 8
44107-167 114 7 197 138 11 226 170 11 239 182 13
44108-242 186 14 242 186 14 162 146 94 78 78 78
44109- 34 34 34 14 14 14 6 6 6 0 0 0
44110- 0 0 0 0 0 0 0 0 0 0 0 0
44111- 0 0 0 0 0 0 0 0 0 0 0 0
44112- 0 0 0 0 0 0 0 0 0 0 0 0
44113- 0 0 0 0 0 0 0 0 0 6 6 6
44114- 30 30 30 78 78 78 190 142 34 226 170 11
44115-239 182 13 246 190 14 246 190 14 246 190 14
44116-246 190 14 246 190 14 246 190 14 246 190 14
44117-246 190 14 246 190 14 246 190 14 246 190 14
44118-246 190 14 241 196 14 203 166 17 22 18 6
44119- 2 2 6 2 2 6 2 2 6 38 38 38
44120-218 218 218 253 253 253 253 253 253 253 253 253
44121-253 253 253 253 253 253 253 253 253 253 253 253
44122-253 253 253 253 253 253 253 253 253 253 253 253
44123-253 253 253 253 253 253 253 253 253 253 253 253
44124-250 250 250 206 206 206 198 198 198 202 162 69
44125-226 170 11 236 178 12 224 166 10 210 150 10
44126-200 144 11 197 138 11 192 133 9 197 138 11
44127-210 150 10 226 170 11 242 186 14 246 190 14
44128-246 190 14 246 186 14 225 175 15 124 112 88
44129- 62 62 62 30 30 30 14 14 14 6 6 6
44130- 0 0 0 0 0 0 0 0 0 0 0 0
44131- 0 0 0 0 0 0 0 0 0 0 0 0
44132- 0 0 0 0 0 0 0 0 0 0 0 0
44133- 0 0 0 0 0 0 0 0 0 10 10 10
44134- 30 30 30 78 78 78 174 135 50 224 166 10
44135-239 182 13 246 190 14 246 190 14 246 190 14
44136-246 190 14 246 190 14 246 190 14 246 190 14
44137-246 190 14 246 190 14 246 190 14 246 190 14
44138-246 190 14 246 190 14 241 196 14 139 102 15
44139- 2 2 6 2 2 6 2 2 6 2 2 6
44140- 78 78 78 250 250 250 253 253 253 253 253 253
44141-253 253 253 253 253 253 253 253 253 253 253 253
44142-253 253 253 253 253 253 253 253 253 253 253 253
44143-253 253 253 253 253 253 253 253 253 253 253 253
44144-250 250 250 214 214 214 198 198 198 190 150 46
44145-219 162 10 236 178 12 234 174 13 224 166 10
44146-216 158 10 213 154 11 213 154 11 216 158 10
44147-226 170 11 239 182 13 246 190 14 246 190 14
44148-246 190 14 246 190 14 242 186 14 206 162 42
44149-101 101 101 58 58 58 30 30 30 14 14 14
44150- 6 6 6 0 0 0 0 0 0 0 0 0
44151- 0 0 0 0 0 0 0 0 0 0 0 0
44152- 0 0 0 0 0 0 0 0 0 0 0 0
44153- 0 0 0 0 0 0 0 0 0 10 10 10
44154- 30 30 30 74 74 74 174 135 50 216 158 10
44155-236 178 12 246 190 14 246 190 14 246 190 14
44156-246 190 14 246 190 14 246 190 14 246 190 14
44157-246 190 14 246 190 14 246 190 14 246 190 14
44158-246 190 14 246 190 14 241 196 14 226 184 13
44159- 61 42 6 2 2 6 2 2 6 2 2 6
44160- 22 22 22 238 238 238 253 253 253 253 253 253
44161-253 253 253 253 253 253 253 253 253 253 253 253
44162-253 253 253 253 253 253 253 253 253 253 253 253
44163-253 253 253 253 253 253 253 253 253 253 253 253
44164-253 253 253 226 226 226 187 187 187 180 133 36
44165-216 158 10 236 178 12 239 182 13 236 178 12
44166-230 174 11 226 170 11 226 170 11 230 174 11
44167-236 178 12 242 186 14 246 190 14 246 190 14
44168-246 190 14 246 190 14 246 186 14 239 182 13
44169-206 162 42 106 106 106 66 66 66 34 34 34
44170- 14 14 14 6 6 6 0 0 0 0 0 0
44171- 0 0 0 0 0 0 0 0 0 0 0 0
44172- 0 0 0 0 0 0 0 0 0 0 0 0
44173- 0 0 0 0 0 0 0 0 0 6 6 6
44174- 26 26 26 70 70 70 163 133 67 213 154 11
44175-236 178 12 246 190 14 246 190 14 246 190 14
44176-246 190 14 246 190 14 246 190 14 246 190 14
44177-246 190 14 246 190 14 246 190 14 246 190 14
44178-246 190 14 246 190 14 246 190 14 241 196 14
44179-190 146 13 18 14 6 2 2 6 2 2 6
44180- 46 46 46 246 246 246 253 253 253 253 253 253
44181-253 253 253 253 253 253 253 253 253 253 253 253
44182-253 253 253 253 253 253 253 253 253 253 253 253
44183-253 253 253 253 253 253 253 253 253 253 253 253
44184-253 253 253 221 221 221 86 86 86 156 107 11
44185-216 158 10 236 178 12 242 186 14 246 186 14
44186-242 186 14 239 182 13 239 182 13 242 186 14
44187-242 186 14 246 186 14 246 190 14 246 190 14
44188-246 190 14 246 190 14 246 190 14 246 190 14
44189-242 186 14 225 175 15 142 122 72 66 66 66
44190- 30 30 30 10 10 10 0 0 0 0 0 0
44191- 0 0 0 0 0 0 0 0 0 0 0 0
44192- 0 0 0 0 0 0 0 0 0 0 0 0
44193- 0 0 0 0 0 0 0 0 0 6 6 6
44194- 26 26 26 70 70 70 163 133 67 210 150 10
44195-236 178 12 246 190 14 246 190 14 246 190 14
44196-246 190 14 246 190 14 246 190 14 246 190 14
44197-246 190 14 246 190 14 246 190 14 246 190 14
44198-246 190 14 246 190 14 246 190 14 246 190 14
44199-232 195 16 121 92 8 34 34 34 106 106 106
44200-221 221 221 253 253 253 253 253 253 253 253 253
44201-253 253 253 253 253 253 253 253 253 253 253 253
44202-253 253 253 253 253 253 253 253 253 253 253 253
44203-253 253 253 253 253 253 253 253 253 253 253 253
44204-242 242 242 82 82 82 18 14 6 163 110 8
44205-216 158 10 236 178 12 242 186 14 246 190 14
44206-246 190 14 246 190 14 246 190 14 246 190 14
44207-246 190 14 246 190 14 246 190 14 246 190 14
44208-246 190 14 246 190 14 246 190 14 246 190 14
44209-246 190 14 246 190 14 242 186 14 163 133 67
44210- 46 46 46 18 18 18 6 6 6 0 0 0
44211- 0 0 0 0 0 0 0 0 0 0 0 0
44212- 0 0 0 0 0 0 0 0 0 0 0 0
44213- 0 0 0 0 0 0 0 0 0 10 10 10
44214- 30 30 30 78 78 78 163 133 67 210 150 10
44215-236 178 12 246 186 14 246 190 14 246 190 14
44216-246 190 14 246 190 14 246 190 14 246 190 14
44217-246 190 14 246 190 14 246 190 14 246 190 14
44218-246 190 14 246 190 14 246 190 14 246 190 14
44219-241 196 14 215 174 15 190 178 144 253 253 253
44220-253 253 253 253 253 253 253 253 253 253 253 253
44221-253 253 253 253 253 253 253 253 253 253 253 253
44222-253 253 253 253 253 253 253 253 253 253 253 253
44223-253 253 253 253 253 253 253 253 253 218 218 218
44224- 58 58 58 2 2 6 22 18 6 167 114 7
44225-216 158 10 236 178 12 246 186 14 246 190 14
44226-246 190 14 246 190 14 246 190 14 246 190 14
44227-246 190 14 246 190 14 246 190 14 246 190 14
44228-246 190 14 246 190 14 246 190 14 246 190 14
44229-246 190 14 246 186 14 242 186 14 190 150 46
44230- 54 54 54 22 22 22 6 6 6 0 0 0
44231- 0 0 0 0 0 0 0 0 0 0 0 0
44232- 0 0 0 0 0 0 0 0 0 0 0 0
44233- 0 0 0 0 0 0 0 0 0 14 14 14
44234- 38 38 38 86 86 86 180 133 36 213 154 11
44235-236 178 12 246 186 14 246 190 14 246 190 14
44236-246 190 14 246 190 14 246 190 14 246 190 14
44237-246 190 14 246 190 14 246 190 14 246 190 14
44238-246 190 14 246 190 14 246 190 14 246 190 14
44239-246 190 14 232 195 16 190 146 13 214 214 214
44240-253 253 253 253 253 253 253 253 253 253 253 253
44241-253 253 253 253 253 253 253 253 253 253 253 253
44242-253 253 253 253 253 253 253 253 253 253 253 253
44243-253 253 253 250 250 250 170 170 170 26 26 26
44244- 2 2 6 2 2 6 37 26 9 163 110 8
44245-219 162 10 239 182 13 246 186 14 246 190 14
44246-246 190 14 246 190 14 246 190 14 246 190 14
44247-246 190 14 246 190 14 246 190 14 246 190 14
44248-246 190 14 246 190 14 246 190 14 246 190 14
44249-246 186 14 236 178 12 224 166 10 142 122 72
44250- 46 46 46 18 18 18 6 6 6 0 0 0
44251- 0 0 0 0 0 0 0 0 0 0 0 0
44252- 0 0 0 0 0 0 0 0 0 0 0 0
44253- 0 0 0 0 0 0 6 6 6 18 18 18
44254- 50 50 50 109 106 95 192 133 9 224 166 10
44255-242 186 14 246 190 14 246 190 14 246 190 14
44256-246 190 14 246 190 14 246 190 14 246 190 14
44257-246 190 14 246 190 14 246 190 14 246 190 14
44258-246 190 14 246 190 14 246 190 14 246 190 14
44259-242 186 14 226 184 13 210 162 10 142 110 46
44260-226 226 226 253 253 253 253 253 253 253 253 253
44261-253 253 253 253 253 253 253 253 253 253 253 253
44262-253 253 253 253 253 253 253 253 253 253 253 253
44263-198 198 198 66 66 66 2 2 6 2 2 6
44264- 2 2 6 2 2 6 50 34 6 156 107 11
44265-219 162 10 239 182 13 246 186 14 246 190 14
44266-246 190 14 246 190 14 246 190 14 246 190 14
44267-246 190 14 246 190 14 246 190 14 246 190 14
44268-246 190 14 246 190 14 246 190 14 242 186 14
44269-234 174 13 213 154 11 154 122 46 66 66 66
44270- 30 30 30 10 10 10 0 0 0 0 0 0
44271- 0 0 0 0 0 0 0 0 0 0 0 0
44272- 0 0 0 0 0 0 0 0 0 0 0 0
44273- 0 0 0 0 0 0 6 6 6 22 22 22
44274- 58 58 58 154 121 60 206 145 10 234 174 13
44275-242 186 14 246 186 14 246 190 14 246 190 14
44276-246 190 14 246 190 14 246 190 14 246 190 14
44277-246 190 14 246 190 14 246 190 14 246 190 14
44278-246 190 14 246 190 14 246 190 14 246 190 14
44279-246 186 14 236 178 12 210 162 10 163 110 8
44280- 61 42 6 138 138 138 218 218 218 250 250 250
44281-253 253 253 253 253 253 253 253 253 250 250 250
44282-242 242 242 210 210 210 144 144 144 66 66 66
44283- 6 6 6 2 2 6 2 2 6 2 2 6
44284- 2 2 6 2 2 6 61 42 6 163 110 8
44285-216 158 10 236 178 12 246 190 14 246 190 14
44286-246 190 14 246 190 14 246 190 14 246 190 14
44287-246 190 14 246 190 14 246 190 14 246 190 14
44288-246 190 14 239 182 13 230 174 11 216 158 10
44289-190 142 34 124 112 88 70 70 70 38 38 38
44290- 18 18 18 6 6 6 0 0 0 0 0 0
44291- 0 0 0 0 0 0 0 0 0 0 0 0
44292- 0 0 0 0 0 0 0 0 0 0 0 0
44293- 0 0 0 0 0 0 6 6 6 22 22 22
44294- 62 62 62 168 124 44 206 145 10 224 166 10
44295-236 178 12 239 182 13 242 186 14 242 186 14
44296-246 186 14 246 190 14 246 190 14 246 190 14
44297-246 190 14 246 190 14 246 190 14 246 190 14
44298-246 190 14 246 190 14 246 190 14 246 190 14
44299-246 190 14 236 178 12 216 158 10 175 118 6
44300- 80 54 7 2 2 6 6 6 6 30 30 30
44301- 54 54 54 62 62 62 50 50 50 38 38 38
44302- 14 14 14 2 2 6 2 2 6 2 2 6
44303- 2 2 6 2 2 6 2 2 6 2 2 6
44304- 2 2 6 6 6 6 80 54 7 167 114 7
44305-213 154 11 236 178 12 246 190 14 246 190 14
44306-246 190 14 246 190 14 246 190 14 246 190 14
44307-246 190 14 242 186 14 239 182 13 239 182 13
44308-230 174 11 210 150 10 174 135 50 124 112 88
44309- 82 82 82 54 54 54 34 34 34 18 18 18
44310- 6 6 6 0 0 0 0 0 0 0 0 0
44311- 0 0 0 0 0 0 0 0 0 0 0 0
44312- 0 0 0 0 0 0 0 0 0 0 0 0
44313- 0 0 0 0 0 0 6 6 6 18 18 18
44314- 50 50 50 158 118 36 192 133 9 200 144 11
44315-216 158 10 219 162 10 224 166 10 226 170 11
44316-230 174 11 236 178 12 239 182 13 239 182 13
44317-242 186 14 246 186 14 246 190 14 246 190 14
44318-246 190 14 246 190 14 246 190 14 246 190 14
44319-246 186 14 230 174 11 210 150 10 163 110 8
44320-104 69 6 10 10 10 2 2 6 2 2 6
44321- 2 2 6 2 2 6 2 2 6 2 2 6
44322- 2 2 6 2 2 6 2 2 6 2 2 6
44323- 2 2 6 2 2 6 2 2 6 2 2 6
44324- 2 2 6 6 6 6 91 60 6 167 114 7
44325-206 145 10 230 174 11 242 186 14 246 190 14
44326-246 190 14 246 190 14 246 186 14 242 186 14
44327-239 182 13 230 174 11 224 166 10 213 154 11
44328-180 133 36 124 112 88 86 86 86 58 58 58
44329- 38 38 38 22 22 22 10 10 10 6 6 6
44330- 0 0 0 0 0 0 0 0 0 0 0 0
44331- 0 0 0 0 0 0 0 0 0 0 0 0
44332- 0 0 0 0 0 0 0 0 0 0 0 0
44333- 0 0 0 0 0 0 0 0 0 14 14 14
44334- 34 34 34 70 70 70 138 110 50 158 118 36
44335-167 114 7 180 123 7 192 133 9 197 138 11
44336-200 144 11 206 145 10 213 154 11 219 162 10
44337-224 166 10 230 174 11 239 182 13 242 186 14
44338-246 186 14 246 186 14 246 186 14 246 186 14
44339-239 182 13 216 158 10 185 133 11 152 99 6
44340-104 69 6 18 14 6 2 2 6 2 2 6
44341- 2 2 6 2 2 6 2 2 6 2 2 6
44342- 2 2 6 2 2 6 2 2 6 2 2 6
44343- 2 2 6 2 2 6 2 2 6 2 2 6
44344- 2 2 6 6 6 6 80 54 7 152 99 6
44345-192 133 9 219 162 10 236 178 12 239 182 13
44346-246 186 14 242 186 14 239 182 13 236 178 12
44347-224 166 10 206 145 10 192 133 9 154 121 60
44348- 94 94 94 62 62 62 42 42 42 22 22 22
44349- 14 14 14 6 6 6 0 0 0 0 0 0
44350- 0 0 0 0 0 0 0 0 0 0 0 0
44351- 0 0 0 0 0 0 0 0 0 0 0 0
44352- 0 0 0 0 0 0 0 0 0 0 0 0
44353- 0 0 0 0 0 0 0 0 0 6 6 6
44354- 18 18 18 34 34 34 58 58 58 78 78 78
44355-101 98 89 124 112 88 142 110 46 156 107 11
44356-163 110 8 167 114 7 175 118 6 180 123 7
44357-185 133 11 197 138 11 210 150 10 219 162 10
44358-226 170 11 236 178 12 236 178 12 234 174 13
44359-219 162 10 197 138 11 163 110 8 130 83 6
44360- 91 60 6 10 10 10 2 2 6 2 2 6
44361- 18 18 18 38 38 38 38 38 38 38 38 38
44362- 38 38 38 38 38 38 38 38 38 38 38 38
44363- 38 38 38 38 38 38 26 26 26 2 2 6
44364- 2 2 6 6 6 6 70 47 6 137 92 6
44365-175 118 6 200 144 11 219 162 10 230 174 11
44366-234 174 13 230 174 11 219 162 10 210 150 10
44367-192 133 9 163 110 8 124 112 88 82 82 82
44368- 50 50 50 30 30 30 14 14 14 6 6 6
44369- 0 0 0 0 0 0 0 0 0 0 0 0
44370- 0 0 0 0 0 0 0 0 0 0 0 0
44371- 0 0 0 0 0 0 0 0 0 0 0 0
44372- 0 0 0 0 0 0 0 0 0 0 0 0
44373- 0 0 0 0 0 0 0 0 0 0 0 0
44374- 6 6 6 14 14 14 22 22 22 34 34 34
44375- 42 42 42 58 58 58 74 74 74 86 86 86
44376-101 98 89 122 102 70 130 98 46 121 87 25
44377-137 92 6 152 99 6 163 110 8 180 123 7
44378-185 133 11 197 138 11 206 145 10 200 144 11
44379-180 123 7 156 107 11 130 83 6 104 69 6
44380- 50 34 6 54 54 54 110 110 110 101 98 89
44381- 86 86 86 82 82 82 78 78 78 78 78 78
44382- 78 78 78 78 78 78 78 78 78 78 78 78
44383- 78 78 78 82 82 82 86 86 86 94 94 94
44384-106 106 106 101 101 101 86 66 34 124 80 6
44385-156 107 11 180 123 7 192 133 9 200 144 11
44386-206 145 10 200 144 11 192 133 9 175 118 6
44387-139 102 15 109 106 95 70 70 70 42 42 42
44388- 22 22 22 10 10 10 0 0 0 0 0 0
44389- 0 0 0 0 0 0 0 0 0 0 0 0
44390- 0 0 0 0 0 0 0 0 0 0 0 0
44391- 0 0 0 0 0 0 0 0 0 0 0 0
44392- 0 0 0 0 0 0 0 0 0 0 0 0
44393- 0 0 0 0 0 0 0 0 0 0 0 0
44394- 0 0 0 0 0 0 6 6 6 10 10 10
44395- 14 14 14 22 22 22 30 30 30 38 38 38
44396- 50 50 50 62 62 62 74 74 74 90 90 90
44397-101 98 89 112 100 78 121 87 25 124 80 6
44398-137 92 6 152 99 6 152 99 6 152 99 6
44399-138 86 6 124 80 6 98 70 6 86 66 30
44400-101 98 89 82 82 82 58 58 58 46 46 46
44401- 38 38 38 34 34 34 34 34 34 34 34 34
44402- 34 34 34 34 34 34 34 34 34 34 34 34
44403- 34 34 34 34 34 34 38 38 38 42 42 42
44404- 54 54 54 82 82 82 94 86 76 91 60 6
44405-134 86 6 156 107 11 167 114 7 175 118 6
44406-175 118 6 167 114 7 152 99 6 121 87 25
44407-101 98 89 62 62 62 34 34 34 18 18 18
44408- 6 6 6 0 0 0 0 0 0 0 0 0
44409- 0 0 0 0 0 0 0 0 0 0 0 0
44410- 0 0 0 0 0 0 0 0 0 0 0 0
44411- 0 0 0 0 0 0 0 0 0 0 0 0
44412- 0 0 0 0 0 0 0 0 0 0 0 0
44413- 0 0 0 0 0 0 0 0 0 0 0 0
44414- 0 0 0 0 0 0 0 0 0 0 0 0
44415- 0 0 0 6 6 6 6 6 6 10 10 10
44416- 18 18 18 22 22 22 30 30 30 42 42 42
44417- 50 50 50 66 66 66 86 86 86 101 98 89
44418-106 86 58 98 70 6 104 69 6 104 69 6
44419-104 69 6 91 60 6 82 62 34 90 90 90
44420- 62 62 62 38 38 38 22 22 22 14 14 14
44421- 10 10 10 10 10 10 10 10 10 10 10 10
44422- 10 10 10 10 10 10 6 6 6 10 10 10
44423- 10 10 10 10 10 10 10 10 10 14 14 14
44424- 22 22 22 42 42 42 70 70 70 89 81 66
44425- 80 54 7 104 69 6 124 80 6 137 92 6
44426-134 86 6 116 81 8 100 82 52 86 86 86
44427- 58 58 58 30 30 30 14 14 14 6 6 6
44428- 0 0 0 0 0 0 0 0 0 0 0 0
44429- 0 0 0 0 0 0 0 0 0 0 0 0
44430- 0 0 0 0 0 0 0 0 0 0 0 0
44431- 0 0 0 0 0 0 0 0 0 0 0 0
44432- 0 0 0 0 0 0 0 0 0 0 0 0
44433- 0 0 0 0 0 0 0 0 0 0 0 0
44434- 0 0 0 0 0 0 0 0 0 0 0 0
44435- 0 0 0 0 0 0 0 0 0 0 0 0
44436- 0 0 0 6 6 6 10 10 10 14 14 14
44437- 18 18 18 26 26 26 38 38 38 54 54 54
44438- 70 70 70 86 86 86 94 86 76 89 81 66
44439- 89 81 66 86 86 86 74 74 74 50 50 50
44440- 30 30 30 14 14 14 6 6 6 0 0 0
44441- 0 0 0 0 0 0 0 0 0 0 0 0
44442- 0 0 0 0 0 0 0 0 0 0 0 0
44443- 0 0 0 0 0 0 0 0 0 0 0 0
44444- 6 6 6 18 18 18 34 34 34 58 58 58
44445- 82 82 82 89 81 66 89 81 66 89 81 66
44446- 94 86 66 94 86 76 74 74 74 50 50 50
44447- 26 26 26 14 14 14 6 6 6 0 0 0
44448- 0 0 0 0 0 0 0 0 0 0 0 0
44449- 0 0 0 0 0 0 0 0 0 0 0 0
44450- 0 0 0 0 0 0 0 0 0 0 0 0
44451- 0 0 0 0 0 0 0 0 0 0 0 0
44452- 0 0 0 0 0 0 0 0 0 0 0 0
44453- 0 0 0 0 0 0 0 0 0 0 0 0
44454- 0 0 0 0 0 0 0 0 0 0 0 0
44455- 0 0 0 0 0 0 0 0 0 0 0 0
44456- 0 0 0 0 0 0 0 0 0 0 0 0
44457- 6 6 6 6 6 6 14 14 14 18 18 18
44458- 30 30 30 38 38 38 46 46 46 54 54 54
44459- 50 50 50 42 42 42 30 30 30 18 18 18
44460- 10 10 10 0 0 0 0 0 0 0 0 0
44461- 0 0 0 0 0 0 0 0 0 0 0 0
44462- 0 0 0 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 6 6 6 14 14 14 26 26 26
44465- 38 38 38 50 50 50 58 58 58 58 58 58
44466- 54 54 54 42 42 42 30 30 30 18 18 18
44467- 10 10 10 0 0 0 0 0 0 0 0 0
44468- 0 0 0 0 0 0 0 0 0 0 0 0
44469- 0 0 0 0 0 0 0 0 0 0 0 0
44470- 0 0 0 0 0 0 0 0 0 0 0 0
44471- 0 0 0 0 0 0 0 0 0 0 0 0
44472- 0 0 0 0 0 0 0 0 0 0 0 0
44473- 0 0 0 0 0 0 0 0 0 0 0 0
44474- 0 0 0 0 0 0 0 0 0 0 0 0
44475- 0 0 0 0 0 0 0 0 0 0 0 0
44476- 0 0 0 0 0 0 0 0 0 0 0 0
44477- 0 0 0 0 0 0 0 0 0 6 6 6
44478- 6 6 6 10 10 10 14 14 14 18 18 18
44479- 18 18 18 14 14 14 10 10 10 6 6 6
44480- 0 0 0 0 0 0 0 0 0 0 0 0
44481- 0 0 0 0 0 0 0 0 0 0 0 0
44482- 0 0 0 0 0 0 0 0 0 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 6 6 6
44485- 14 14 14 18 18 18 22 22 22 22 22 22
44486- 18 18 18 14 14 14 10 10 10 6 6 6
44487- 0 0 0 0 0 0 0 0 0 0 0 0
44488- 0 0 0 0 0 0 0 0 0 0 0 0
44489- 0 0 0 0 0 0 0 0 0 0 0 0
44490- 0 0 0 0 0 0 0 0 0 0 0 0
44491- 0 0 0 0 0 0 0 0 0 0 0 0
44492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44505+4 4 4 4 4 4
44506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44519+4 4 4 4 4 4
44520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44533+4 4 4 4 4 4
44534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44547+4 4 4 4 4 4
44548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44561+4 4 4 4 4 4
44562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44575+4 4 4 4 4 4
44576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44580+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44581+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44585+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44586+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44587+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44589+4 4 4 4 4 4
44590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44594+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44595+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44596+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44599+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44600+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44601+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44602+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44603+4 4 4 4 4 4
44604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44608+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44609+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44610+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44613+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44614+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44615+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44616+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44617+4 4 4 4 4 4
44618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44621+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44622+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44623+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44624+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44626+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44627+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44628+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44629+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44630+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44631+4 4 4 4 4 4
44632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44635+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44636+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44637+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44638+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44639+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44640+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44641+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44642+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44643+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44644+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44645+4 4 4 4 4 4
44646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44649+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44650+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44651+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44652+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44653+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44654+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44655+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44656+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44657+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44658+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44659+4 4 4 4 4 4
44660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44662+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44663+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44664+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44665+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44666+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44667+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44668+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44669+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44670+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44671+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44672+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44673+4 4 4 4 4 4
44674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44676+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44677+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44678+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44679+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44680+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44681+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44682+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44683+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44684+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44685+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44686+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44687+4 4 4 4 4 4
44688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44690+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44691+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44692+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44693+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44694+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44695+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44696+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44697+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44698+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44699+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44700+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44701+4 4 4 4 4 4
44702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44704+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44705+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44706+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44707+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44708+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44709+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44710+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44711+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44712+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44713+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44714+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44715+4 4 4 4 4 4
44716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44717+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44718+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44719+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44720+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44721+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44722+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44723+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44724+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44725+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44726+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44727+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44728+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44729+4 4 4 4 4 4
44730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44731+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44732+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44733+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44734+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44735+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44736+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44737+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44738+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44739+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44740+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44741+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44742+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44743+0 0 0 4 4 4
44744+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44745+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44746+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44747+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44748+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44749+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44750+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44751+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44752+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44753+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
44754+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
44755+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
44756+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
44757+2 0 0 0 0 0
44758+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
44759+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
44760+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
44761+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
44762+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
44763+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
44764+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
44765+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
44766+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
44767+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
44768+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
44769+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
44770+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
44771+37 38 37 0 0 0
44772+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44773+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
44774+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
44775+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
44776+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
44777+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
44778+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
44779+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
44780+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
44781+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
44782+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
44783+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
44784+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
44785+85 115 134 4 0 0
44786+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
44787+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
44788+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
44789+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
44790+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
44791+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
44792+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
44793+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
44794+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
44795+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
44796+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
44797+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
44798+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
44799+60 73 81 4 0 0
44800+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
44801+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
44802+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
44803+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
44804+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
44805+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
44806+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
44807+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
44808+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
44809+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
44810+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
44811+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
44812+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
44813+16 19 21 4 0 0
44814+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
44815+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
44816+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
44817+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
44818+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
44819+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
44820+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
44821+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
44822+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
44823+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
44824+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
44825+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
44826+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
44827+4 0 0 4 3 3
44828+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
44829+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
44830+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
44831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
44832+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
44833+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
44834+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
44835+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
44836+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
44837+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
44838+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
44839+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
44840+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
44841+3 2 2 4 4 4
44842+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
44843+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
44844+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
44845+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44846+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
44847+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
44848+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
44849+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
44850+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
44851+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
44852+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
44853+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
44854+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
44855+4 4 4 4 4 4
44856+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
44857+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
44858+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
44859+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
44860+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
44861+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
44862+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
44863+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
44864+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
44865+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
44866+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
44867+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
44868+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
44869+4 4 4 4 4 4
44870+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
44871+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
44872+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
44873+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
44874+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
44875+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44876+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
44877+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
44878+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
44879+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
44880+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
44881+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
44882+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
44883+5 5 5 5 5 5
44884+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
44885+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
44886+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
44887+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
44888+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
44889+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44890+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
44891+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
44892+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
44893+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
44894+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
44895+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
44896+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
44897+5 5 5 4 4 4
44898+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
44899+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
44900+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
44901+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
44902+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44903+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
44904+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
44905+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
44906+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
44907+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
44908+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
44909+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
44910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44911+4 4 4 4 4 4
44912+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
44913+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
44914+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
44915+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
44916+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
44917+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44918+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44919+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
44920+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
44921+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
44922+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
44923+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
44924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44925+4 4 4 4 4 4
44926+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
44927+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
44928+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
44929+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
44930+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44931+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
44932+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
44933+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
44934+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
44935+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
44936+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
44937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44939+4 4 4 4 4 4
44940+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
44941+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
44942+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
44943+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
44944+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44945+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44946+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44947+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
44948+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
44949+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
44950+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
44951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44953+4 4 4 4 4 4
44954+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
44955+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
44956+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
44957+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
44958+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44959+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
44960+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44961+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
44962+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
44963+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
44964+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
44965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44967+4 4 4 4 4 4
44968+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
44969+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
44970+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
44971+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
44972+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
44973+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
44974+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
44975+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
44976+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
44977+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
44978+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
44979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44981+4 4 4 4 4 4
44982+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
44983+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
44984+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
44985+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
44986+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
44987+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
44988+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
44989+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
44990+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
44991+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
44992+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
44993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44995+4 4 4 4 4 4
44996+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
44997+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
44998+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
44999+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45000+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45001+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45002+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45003+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45004+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45005+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45006+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009+4 4 4 4 4 4
45010+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45011+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45012+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45013+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45014+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45015+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45016+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45017+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45018+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45019+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45020+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023+4 4 4 4 4 4
45024+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45025+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45026+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45027+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45028+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45029+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45030+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45031+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45032+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45033+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45034+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037+4 4 4 4 4 4
45038+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45039+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45040+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45041+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45042+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45043+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45044+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45045+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45046+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45047+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45048+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051+4 4 4 4 4 4
45052+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45053+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45054+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45055+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45056+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45057+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45058+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45059+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45060+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45061+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45062+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065+4 4 4 4 4 4
45066+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45067+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45068+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45069+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45070+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45071+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45072+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45073+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45074+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45075+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45076+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079+4 4 4 4 4 4
45080+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45081+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45082+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45083+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45084+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45085+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45086+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45087+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45088+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45089+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45090+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093+4 4 4 4 4 4
45094+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45095+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45096+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45097+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45098+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45099+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45100+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45101+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45102+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45103+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45104+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45107+4 4 4 4 4 4
45108+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45109+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45110+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45111+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45112+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45113+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45114+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45115+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45116+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45117+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45118+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45121+4 4 4 4 4 4
45122+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45123+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45124+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45125+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45126+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45127+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45128+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45129+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45130+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45131+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45132+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45135+4 4 4 4 4 4
45136+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45137+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45138+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45139+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45140+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45141+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45142+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45143+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45144+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45145+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45146+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45149+4 4 4 4 4 4
45150+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45151+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45152+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45153+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45154+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45155+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45156+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45157+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45158+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45159+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45160+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45163+4 4 4 4 4 4
45164+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45165+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45166+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45167+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45168+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45169+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45170+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45171+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45172+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45173+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45174+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45177+4 4 4 4 4 4
45178+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45179+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45180+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45181+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45182+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45183+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45184+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45185+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45186+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45187+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45188+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45191+4 4 4 4 4 4
45192+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45193+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45194+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45195+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45196+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45197+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45198+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45199+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45200+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45201+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45202+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45205+4 4 4 4 4 4
45206+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45207+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45208+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45209+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45210+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45211+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45212+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45213+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45214+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45215+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45216+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45219+4 4 4 4 4 4
45220+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45221+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45222+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45223+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45224+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45225+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45226+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45227+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45228+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45229+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45230+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45233+4 4 4 4 4 4
45234+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45235+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45236+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45237+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45238+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45239+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45240+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45241+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45242+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45243+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45244+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45247+4 4 4 4 4 4
45248+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45249+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45250+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45251+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45252+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45253+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45254+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45255+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45256+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45257+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45258+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45261+4 4 4 4 4 4
45262+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45263+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45264+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45265+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45266+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45267+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45268+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45269+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45270+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45271+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45272+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45275+4 4 4 4 4 4
45276+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45277+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45278+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45279+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45280+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45281+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45282+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45283+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45284+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45285+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45286+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45289+4 4 4 4 4 4
45290+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45291+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45292+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45293+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45294+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45295+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45296+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45297+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45298+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45299+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45300+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45303+4 4 4 4 4 4
45304+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45305+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45306+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45307+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45308+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45309+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45310+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45311+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45312+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45313+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45317+4 4 4 4 4 4
45318+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45319+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45320+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45321+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45322+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45323+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45324+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45325+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45326+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45327+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45331+4 4 4 4 4 4
45332+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45333+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45334+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45335+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45336+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45337+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45338+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45339+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45340+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45341+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45345+4 4 4 4 4 4
45346+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45347+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45348+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45349+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45350+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45351+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45352+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45353+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45354+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45355+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45359+4 4 4 4 4 4
45360+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45361+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45362+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45363+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45364+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45365+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45366+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45367+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45368+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45373+4 4 4 4 4 4
45374+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45375+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45376+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45377+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45378+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45379+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45380+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45381+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45382+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45387+4 4 4 4 4 4
45388+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45389+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45390+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45391+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45392+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45393+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45394+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45395+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45396+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45401+4 4 4 4 4 4
45402+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45403+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45404+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45405+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45406+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45407+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45408+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45409+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45415+4 4 4 4 4 4
45416+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45417+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45418+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45419+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45420+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45421+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45422+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45423+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45429+4 4 4 4 4 4
45430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45431+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45432+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45433+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45434+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45435+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45436+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45437+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45443+4 4 4 4 4 4
45444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45445+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45446+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45447+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45448+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45449+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45450+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45451+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45457+4 4 4 4 4 4
45458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45459+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45460+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45461+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45462+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45463+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45464+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45465+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45471+4 4 4 4 4 4
45472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45474+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45475+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45476+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45477+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45478+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45479+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45485+4 4 4 4 4 4
45486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45489+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45490+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45491+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45492+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45499+4 4 4 4 4 4
45500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45503+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45504+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45505+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45506+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45513+4 4 4 4 4 4
45514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45517+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45518+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45519+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45520+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45527+4 4 4 4 4 4
45528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45531+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45532+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45533+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45534+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45541+4 4 4 4 4 4
45542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45546+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45547+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45548+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45555+4 4 4 4 4 4
45556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45560+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45561+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45562+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569+4 4 4 4 4 4
45570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45574+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45575+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45576+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583+4 4 4 4 4 4
45584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45589+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597+4 4 4 4 4 4
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45603+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45611+4 4 4 4 4 4
45612diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45613index 443e3c8..c443d6a 100644
45614--- a/drivers/video/nvidia/nv_backlight.c
45615+++ b/drivers/video/nvidia/nv_backlight.c
45616@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45617 return bd->props.brightness;
45618 }
45619
45620-static struct backlight_ops nvidia_bl_ops = {
45621+static const struct backlight_ops nvidia_bl_ops = {
45622 .get_brightness = nvidia_bl_get_brightness,
45623 .update_status = nvidia_bl_update_status,
45624 };
45625diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45626index d94c57f..912984c 100644
45627--- a/drivers/video/riva/fbdev.c
45628+++ b/drivers/video/riva/fbdev.c
45629@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45630 return bd->props.brightness;
45631 }
45632
45633-static struct backlight_ops riva_bl_ops = {
45634+static const struct backlight_ops riva_bl_ops = {
45635 .get_brightness = riva_bl_get_brightness,
45636 .update_status = riva_bl_update_status,
45637 };
45638diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45639index 54fbb29..2c108fc 100644
45640--- a/drivers/video/uvesafb.c
45641+++ b/drivers/video/uvesafb.c
45642@@ -18,6 +18,7 @@
45643 #include <linux/fb.h>
45644 #include <linux/io.h>
45645 #include <linux/mutex.h>
45646+#include <linux/moduleloader.h>
45647 #include <video/edid.h>
45648 #include <video/uvesafb.h>
45649 #ifdef CONFIG_X86
45650@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45651 NULL,
45652 };
45653
45654- return call_usermodehelper(v86d_path, argv, envp, 1);
45655+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45656 }
45657
45658 /*
45659@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45660 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45661 par->pmi_setpal = par->ypan = 0;
45662 } else {
45663+
45664+#ifdef CONFIG_PAX_KERNEXEC
45665+#ifdef CONFIG_MODULES
45666+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45667+#endif
45668+ if (!par->pmi_code) {
45669+ par->pmi_setpal = par->ypan = 0;
45670+ return 0;
45671+ }
45672+#endif
45673+
45674 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45675 + task->t.regs.edi);
45676+
45677+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45678+ pax_open_kernel();
45679+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45680+ pax_close_kernel();
45681+
45682+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45683+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45684+#else
45685 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45686 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45687+#endif
45688+
45689 printk(KERN_INFO "uvesafb: protected mode interface info at "
45690 "%04x:%04x\n",
45691 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45692@@ -1799,6 +1822,11 @@ out:
45693 if (par->vbe_modes)
45694 kfree(par->vbe_modes);
45695
45696+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45697+ if (par->pmi_code)
45698+ module_free_exec(NULL, par->pmi_code);
45699+#endif
45700+
45701 framebuffer_release(info);
45702 return err;
45703 }
45704@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45705 kfree(par->vbe_state_orig);
45706 if (par->vbe_state_saved)
45707 kfree(par->vbe_state_saved);
45708+
45709+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45710+ if (par->pmi_code)
45711+ module_free_exec(NULL, par->pmi_code);
45712+#endif
45713+
45714 }
45715
45716 framebuffer_release(info);
45717diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45718index bd37ee1..cb827e8 100644
45719--- a/drivers/video/vesafb.c
45720+++ b/drivers/video/vesafb.c
45721@@ -9,6 +9,7 @@
45722 */
45723
45724 #include <linux/module.h>
45725+#include <linux/moduleloader.h>
45726 #include <linux/kernel.h>
45727 #include <linux/errno.h>
45728 #include <linux/string.h>
45729@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45730 static int vram_total __initdata; /* Set total amount of memory */
45731 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45732 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45733-static void (*pmi_start)(void) __read_mostly;
45734-static void (*pmi_pal) (void) __read_mostly;
45735+static void (*pmi_start)(void) __read_only;
45736+static void (*pmi_pal) (void) __read_only;
45737 static int depth __read_mostly;
45738 static int vga_compat __read_mostly;
45739 /* --------------------------------------------------------------------- */
45740@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45741 unsigned int size_vmode;
45742 unsigned int size_remap;
45743 unsigned int size_total;
45744+ void *pmi_code = NULL;
45745
45746 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45747 return -ENODEV;
45748@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45749 size_remap = size_total;
45750 vesafb_fix.smem_len = size_remap;
45751
45752-#ifndef __i386__
45753- screen_info.vesapm_seg = 0;
45754-#endif
45755-
45756 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45757 printk(KERN_WARNING
45758 "vesafb: cannot reserve video memory at 0x%lx\n",
45759@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45760 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45761 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45762
45763+#ifdef __i386__
45764+
45765+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45766+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
45767+ if (!pmi_code)
45768+#elif !defined(CONFIG_PAX_KERNEXEC)
45769+ if (0)
45770+#endif
45771+
45772+#endif
45773+ screen_info.vesapm_seg = 0;
45774+
45775 if (screen_info.vesapm_seg) {
45776- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
45777- screen_info.vesapm_seg,screen_info.vesapm_off);
45778+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
45779+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
45780 }
45781
45782 if (screen_info.vesapm_seg < 0xc000)
45783@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
45784
45785 if (ypan || pmi_setpal) {
45786 unsigned short *pmi_base;
45787+
45788 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
45789- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
45790- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
45791+
45792+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45793+ pax_open_kernel();
45794+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
45795+#else
45796+ pmi_code = pmi_base;
45797+#endif
45798+
45799+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
45800+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
45801+
45802+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45803+ pmi_start = ktva_ktla(pmi_start);
45804+ pmi_pal = ktva_ktla(pmi_pal);
45805+ pax_close_kernel();
45806+#endif
45807+
45808 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
45809 if (pmi_base[3]) {
45810 printk(KERN_INFO "vesafb: pmi: ports = ");
45811@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
45812 info->node, info->fix.id);
45813 return 0;
45814 err:
45815+
45816+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45817+ module_free_exec(NULL, pmi_code);
45818+#endif
45819+
45820 if (info->screen_base)
45821 iounmap(info->screen_base);
45822 framebuffer_release(info);
45823diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
45824index 88a60e0..6783cc2 100644
45825--- a/drivers/xen/sys-hypervisor.c
45826+++ b/drivers/xen/sys-hypervisor.c
45827@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
45828 return 0;
45829 }
45830
45831-static struct sysfs_ops hyp_sysfs_ops = {
45832+static const struct sysfs_ops hyp_sysfs_ops = {
45833 .show = hyp_sysfs_show,
45834 .store = hyp_sysfs_store,
45835 };
45836diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
45837index 18f74ec..3227009 100644
45838--- a/fs/9p/vfs_inode.c
45839+++ b/fs/9p/vfs_inode.c
45840@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
45841 static void
45842 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45843 {
45844- char *s = nd_get_link(nd);
45845+ const char *s = nd_get_link(nd);
45846
45847 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
45848 IS_ERR(s) ? "<error>" : s);
45849diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
45850index bb4cc5b..df5eaa0 100644
45851--- a/fs/Kconfig.binfmt
45852+++ b/fs/Kconfig.binfmt
45853@@ -86,7 +86,7 @@ config HAVE_AOUT
45854
45855 config BINFMT_AOUT
45856 tristate "Kernel support for a.out and ECOFF binaries"
45857- depends on HAVE_AOUT
45858+ depends on HAVE_AOUT && BROKEN
45859 ---help---
45860 A.out (Assembler.OUTput) is a set of formats for libraries and
45861 executables used in the earliest versions of UNIX. Linux used
45862diff --git a/fs/aio.c b/fs/aio.c
45863index 22a19ad..d484e5b 100644
45864--- a/fs/aio.c
45865+++ b/fs/aio.c
45866@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
45867 size += sizeof(struct io_event) * nr_events;
45868 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
45869
45870- if (nr_pages < 0)
45871+ if (nr_pages <= 0)
45872 return -EINVAL;
45873
45874 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
45875@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
45876 struct aio_timeout to;
45877 int retry = 0;
45878
45879+ pax_track_stack();
45880+
45881 /* needed to zero any padding within an entry (there shouldn't be
45882 * any, but C is fun!
45883 */
45884@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
45885 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
45886 {
45887 ssize_t ret;
45888+ struct iovec iovstack;
45889
45890 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
45891 kiocb->ki_nbytes, 1,
45892- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
45893+ &iovstack, &kiocb->ki_iovec);
45894 if (ret < 0)
45895 goto out;
45896
45897+ if (kiocb->ki_iovec == &iovstack) {
45898+ kiocb->ki_inline_vec = iovstack;
45899+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
45900+ }
45901 kiocb->ki_nr_segs = kiocb->ki_nbytes;
45902 kiocb->ki_cur_seg = 0;
45903 /* ki_nbytes/left now reflect bytes instead of segs */
45904diff --git a/fs/attr.c b/fs/attr.c
45905index 96d394b..33cf5b4 100644
45906--- a/fs/attr.c
45907+++ b/fs/attr.c
45908@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
45909 unsigned long limit;
45910
45911 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
45912+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
45913 if (limit != RLIM_INFINITY && offset > limit)
45914 goto out_sig;
45915 if (offset > inode->i_sb->s_maxbytes)
45916diff --git a/fs/autofs/root.c b/fs/autofs/root.c
45917index 4a1401c..05eb5ca 100644
45918--- a/fs/autofs/root.c
45919+++ b/fs/autofs/root.c
45920@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
45921 set_bit(n,sbi->symlink_bitmap);
45922 sl = &sbi->symlink[n];
45923 sl->len = strlen(symname);
45924- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
45925+ slsize = sl->len+1;
45926+ sl->data = kmalloc(slsize, GFP_KERNEL);
45927 if (!sl->data) {
45928 clear_bit(n,sbi->symlink_bitmap);
45929 unlock_kernel();
45930diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
45931index b4ea829..e63ef18 100644
45932--- a/fs/autofs4/symlink.c
45933+++ b/fs/autofs4/symlink.c
45934@@ -15,7 +15,7 @@
45935 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
45936 {
45937 struct autofs_info *ino = autofs4_dentry_ino(dentry);
45938- nd_set_link(nd, (char *)ino->u.symlink);
45939+ nd_set_link(nd, ino->u.symlink);
45940 return NULL;
45941 }
45942
45943diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
45944index 2341375..df9d1c2 100644
45945--- a/fs/autofs4/waitq.c
45946+++ b/fs/autofs4/waitq.c
45947@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
45948 {
45949 unsigned long sigpipe, flags;
45950 mm_segment_t fs;
45951- const char *data = (const char *)addr;
45952+ const char __user *data = (const char __force_user *)addr;
45953 ssize_t wr = 0;
45954
45955 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
45956diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
45957index 9158c07..3f06659 100644
45958--- a/fs/befs/linuxvfs.c
45959+++ b/fs/befs/linuxvfs.c
45960@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45961 {
45962 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
45963 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
45964- char *link = nd_get_link(nd);
45965+ const char *link = nd_get_link(nd);
45966 if (!IS_ERR(link))
45967 kfree(link);
45968 }
45969diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
45970index 0133b5a..b3baa9f 100644
45971--- a/fs/binfmt_aout.c
45972+++ b/fs/binfmt_aout.c
45973@@ -16,6 +16,7 @@
45974 #include <linux/string.h>
45975 #include <linux/fs.h>
45976 #include <linux/file.h>
45977+#include <linux/security.h>
45978 #include <linux/stat.h>
45979 #include <linux/fcntl.h>
45980 #include <linux/ptrace.h>
45981@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
45982 #endif
45983 # define START_STACK(u) (u.start_stack)
45984
45985+ memset(&dump, 0, sizeof(dump));
45986+
45987 fs = get_fs();
45988 set_fs(KERNEL_DS);
45989 has_dumped = 1;
45990@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
45991
45992 /* If the size of the dump file exceeds the rlimit, then see what would happen
45993 if we wrote the stack, but not the data area. */
45994+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
45995 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
45996 dump.u_dsize = 0;
45997
45998 /* Make sure we have enough room to write the stack and data areas. */
45999+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46000 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46001 dump.u_ssize = 0;
46002
46003@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46004 dump_size = dump.u_ssize << PAGE_SHIFT;
46005 DUMP_WRITE(dump_start,dump_size);
46006 }
46007-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46008- set_fs(KERNEL_DS);
46009- DUMP_WRITE(current,sizeof(*current));
46010+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46011 end_coredump:
46012 set_fs(fs);
46013 return has_dumped;
46014@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46015 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46016 if (rlim >= RLIM_INFINITY)
46017 rlim = ~0;
46018+
46019+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46020 if (ex.a_data + ex.a_bss > rlim)
46021 return -ENOMEM;
46022
46023@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46024 install_exec_creds(bprm);
46025 current->flags &= ~PF_FORKNOEXEC;
46026
46027+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46028+ current->mm->pax_flags = 0UL;
46029+#endif
46030+
46031+#ifdef CONFIG_PAX_PAGEEXEC
46032+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46033+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46034+
46035+#ifdef CONFIG_PAX_EMUTRAMP
46036+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46037+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46038+#endif
46039+
46040+#ifdef CONFIG_PAX_MPROTECT
46041+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46042+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46043+#endif
46044+
46045+ }
46046+#endif
46047+
46048 if (N_MAGIC(ex) == OMAGIC) {
46049 unsigned long text_addr, map_size;
46050 loff_t pos;
46051@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46052
46053 down_write(&current->mm->mmap_sem);
46054 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46055- PROT_READ | PROT_WRITE | PROT_EXEC,
46056+ PROT_READ | PROT_WRITE,
46057 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46058 fd_offset + ex.a_text);
46059 up_write(&current->mm->mmap_sem);
46060diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46061index 1ed37ba..32cc555 100644
46062--- a/fs/binfmt_elf.c
46063+++ b/fs/binfmt_elf.c
46064@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46065 #define elf_core_dump NULL
46066 #endif
46067
46068+#ifdef CONFIG_PAX_MPROTECT
46069+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46070+#endif
46071+
46072 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46073 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46074 #else
46075@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format = {
46076 .load_binary = load_elf_binary,
46077 .load_shlib = load_elf_library,
46078 .core_dump = elf_core_dump,
46079+
46080+#ifdef CONFIG_PAX_MPROTECT
46081+ .handle_mprotect= elf_handle_mprotect,
46082+#endif
46083+
46084 .min_coredump = ELF_EXEC_PAGESIZE,
46085 .hasvdso = 1
46086 };
46087@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
46088
46089 static int set_brk(unsigned long start, unsigned long end)
46090 {
46091+ unsigned long e = end;
46092+
46093 start = ELF_PAGEALIGN(start);
46094 end = ELF_PAGEALIGN(end);
46095 if (end > start) {
46096@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
46097 if (BAD_ADDR(addr))
46098 return addr;
46099 }
46100- current->mm->start_brk = current->mm->brk = end;
46101+ current->mm->start_brk = current->mm->brk = e;
46102 return 0;
46103 }
46104
46105@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46106 elf_addr_t __user *u_rand_bytes;
46107 const char *k_platform = ELF_PLATFORM;
46108 const char *k_base_platform = ELF_BASE_PLATFORM;
46109- unsigned char k_rand_bytes[16];
46110+ u32 k_rand_bytes[4];
46111 int items;
46112 elf_addr_t *elf_info;
46113 int ei_index = 0;
46114 const struct cred *cred = current_cred();
46115 struct vm_area_struct *vma;
46116+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46117+
46118+ pax_track_stack();
46119
46120 /*
46121 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46122@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46123 * Generate 16 random bytes for userspace PRNG seeding.
46124 */
46125 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46126- u_rand_bytes = (elf_addr_t __user *)
46127- STACK_ALLOC(p, sizeof(k_rand_bytes));
46128+ srandom32(k_rand_bytes[0] ^ random32());
46129+ srandom32(k_rand_bytes[1] ^ random32());
46130+ srandom32(k_rand_bytes[2] ^ random32());
46131+ srandom32(k_rand_bytes[3] ^ random32());
46132+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46133+ u_rand_bytes = (elf_addr_t __user *) p;
46134 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46135 return -EFAULT;
46136
46137@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46138 return -EFAULT;
46139 current->mm->env_end = p;
46140
46141+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46142+
46143 /* Put the elf_info on the stack in the right place. */
46144 sp = (elf_addr_t __user *)envp + 1;
46145- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46146+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46147 return -EFAULT;
46148 return 0;
46149 }
46150@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46151 {
46152 struct elf_phdr *elf_phdata;
46153 struct elf_phdr *eppnt;
46154- unsigned long load_addr = 0;
46155+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46156 int load_addr_set = 0;
46157 unsigned long last_bss = 0, elf_bss = 0;
46158- unsigned long error = ~0UL;
46159+ unsigned long error = -EINVAL;
46160 unsigned long total_size;
46161 int retval, i, size;
46162
46163@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46164 goto out_close;
46165 }
46166
46167+#ifdef CONFIG_PAX_SEGMEXEC
46168+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46169+ pax_task_size = SEGMEXEC_TASK_SIZE;
46170+#endif
46171+
46172 eppnt = elf_phdata;
46173 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46174 if (eppnt->p_type == PT_LOAD) {
46175@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46176 k = load_addr + eppnt->p_vaddr;
46177 if (BAD_ADDR(k) ||
46178 eppnt->p_filesz > eppnt->p_memsz ||
46179- eppnt->p_memsz > TASK_SIZE ||
46180- TASK_SIZE - eppnt->p_memsz < k) {
46181+ eppnt->p_memsz > pax_task_size ||
46182+ pax_task_size - eppnt->p_memsz < k) {
46183 error = -ENOMEM;
46184 goto out_close;
46185 }
46186@@ -532,6 +557,194 @@ out:
46187 return error;
46188 }
46189
46190+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
46191+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
46192+{
46193+ unsigned long pax_flags = 0UL;
46194+
46195+#ifdef CONFIG_PAX_PAGEEXEC
46196+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46197+ pax_flags |= MF_PAX_PAGEEXEC;
46198+#endif
46199+
46200+#ifdef CONFIG_PAX_SEGMEXEC
46201+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46202+ pax_flags |= MF_PAX_SEGMEXEC;
46203+#endif
46204+
46205+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46206+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46207+ if (nx_enabled)
46208+ pax_flags &= ~MF_PAX_SEGMEXEC;
46209+ else
46210+ pax_flags &= ~MF_PAX_PAGEEXEC;
46211+ }
46212+#endif
46213+
46214+#ifdef CONFIG_PAX_EMUTRAMP
46215+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46216+ pax_flags |= MF_PAX_EMUTRAMP;
46217+#endif
46218+
46219+#ifdef CONFIG_PAX_MPROTECT
46220+ if (elf_phdata->p_flags & PF_MPROTECT)
46221+ pax_flags |= MF_PAX_MPROTECT;
46222+#endif
46223+
46224+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46225+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46226+ pax_flags |= MF_PAX_RANDMMAP;
46227+#endif
46228+
46229+ return pax_flags;
46230+}
46231+#endif
46232+
46233+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46234+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
46235+{
46236+ unsigned long pax_flags = 0UL;
46237+
46238+#ifdef CONFIG_PAX_PAGEEXEC
46239+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46240+ pax_flags |= MF_PAX_PAGEEXEC;
46241+#endif
46242+
46243+#ifdef CONFIG_PAX_SEGMEXEC
46244+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46245+ pax_flags |= MF_PAX_SEGMEXEC;
46246+#endif
46247+
46248+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46249+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46250+ if (nx_enabled)
46251+ pax_flags &= ~MF_PAX_SEGMEXEC;
46252+ else
46253+ pax_flags &= ~MF_PAX_PAGEEXEC;
46254+ }
46255+#endif
46256+
46257+#ifdef CONFIG_PAX_EMUTRAMP
46258+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46259+ pax_flags |= MF_PAX_EMUTRAMP;
46260+#endif
46261+
46262+#ifdef CONFIG_PAX_MPROTECT
46263+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46264+ pax_flags |= MF_PAX_MPROTECT;
46265+#endif
46266+
46267+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46268+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46269+ pax_flags |= MF_PAX_RANDMMAP;
46270+#endif
46271+
46272+ return pax_flags;
46273+}
46274+#endif
46275+
46276+#ifdef CONFIG_PAX_EI_PAX
46277+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46278+{
46279+ unsigned long pax_flags = 0UL;
46280+
46281+#ifdef CONFIG_PAX_PAGEEXEC
46282+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46283+ pax_flags |= MF_PAX_PAGEEXEC;
46284+#endif
46285+
46286+#ifdef CONFIG_PAX_SEGMEXEC
46287+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46288+ pax_flags |= MF_PAX_SEGMEXEC;
46289+#endif
46290+
46291+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46292+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46293+ if (nx_enabled)
46294+ pax_flags &= ~MF_PAX_SEGMEXEC;
46295+ else
46296+ pax_flags &= ~MF_PAX_PAGEEXEC;
46297+ }
46298+#endif
46299+
46300+#ifdef CONFIG_PAX_EMUTRAMP
46301+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46302+ pax_flags |= MF_PAX_EMUTRAMP;
46303+#endif
46304+
46305+#ifdef CONFIG_PAX_MPROTECT
46306+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46307+ pax_flags |= MF_PAX_MPROTECT;
46308+#endif
46309+
46310+#ifdef CONFIG_PAX_ASLR
46311+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46312+ pax_flags |= MF_PAX_RANDMMAP;
46313+#endif
46314+
46315+ return pax_flags;
46316+}
46317+#endif
46318+
46319+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
46320+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46321+{
46322+ unsigned long pax_flags = 0UL;
46323+
46324+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46325+ unsigned long i;
46326+ int found_flags = 0;
46327+#endif
46328+
46329+#ifdef CONFIG_PAX_EI_PAX
46330+ pax_flags = pax_parse_ei_pax(elf_ex);
46331+#endif
46332+
46333+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46334+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46335+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46336+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46337+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46338+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46339+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46340+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46341+ return -EINVAL;
46342+
46343+#ifdef CONFIG_PAX_SOFTMODE
46344+ if (pax_softmode)
46345+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
46346+ else
46347+#endif
46348+
46349+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
46350+ found_flags = 1;
46351+ break;
46352+ }
46353+#endif
46354+
46355+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
46356+ if (found_flags == 0) {
46357+ struct elf_phdr phdr;
46358+ memset(&phdr, 0, sizeof(phdr));
46359+ phdr.p_flags = PF_NOEMUTRAMP;
46360+#ifdef CONFIG_PAX_SOFTMODE
46361+ if (pax_softmode)
46362+ pax_flags = pax_parse_softmode(&phdr);
46363+ else
46364+#endif
46365+ pax_flags = pax_parse_hardmode(&phdr);
46366+ }
46367+#endif
46368+
46369+
46370+ if (0 > pax_check_flags(&pax_flags))
46371+ return -EINVAL;
46372+
46373+ current->mm->pax_flags = pax_flags;
46374+ return 0;
46375+}
46376+#endif
46377+
46378 /*
46379 * These are the functions used to load ELF style executables and shared
46380 * libraries. There is no binary dependent code anywhere else.
46381@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46382 {
46383 unsigned int random_variable = 0;
46384
46385+#ifdef CONFIG_PAX_RANDUSTACK
46386+ if (randomize_va_space)
46387+ return stack_top - current->mm->delta_stack;
46388+#endif
46389+
46390 if ((current->flags & PF_RANDOMIZE) &&
46391 !(current->personality & ADDR_NO_RANDOMIZE)) {
46392 random_variable = get_random_int() & STACK_RND_MASK;
46393@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46394 unsigned long load_addr = 0, load_bias = 0;
46395 int load_addr_set = 0;
46396 char * elf_interpreter = NULL;
46397- unsigned long error;
46398+ unsigned long error = 0;
46399 struct elf_phdr *elf_ppnt, *elf_phdata;
46400 unsigned long elf_bss, elf_brk;
46401 int retval, i;
46402@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46403 unsigned long start_code, end_code, start_data, end_data;
46404 unsigned long reloc_func_desc = 0;
46405 int executable_stack = EXSTACK_DEFAULT;
46406- unsigned long def_flags = 0;
46407 struct {
46408 struct elfhdr elf_ex;
46409 struct elfhdr interp_elf_ex;
46410 } *loc;
46411+ unsigned long pax_task_size = TASK_SIZE;
46412
46413 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46414 if (!loc) {
46415@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46416
46417 /* OK, This is the point of no return */
46418 current->flags &= ~PF_FORKNOEXEC;
46419- current->mm->def_flags = def_flags;
46420+
46421+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46422+ current->mm->pax_flags = 0UL;
46423+#endif
46424+
46425+#ifdef CONFIG_PAX_DLRESOLVE
46426+ current->mm->call_dl_resolve = 0UL;
46427+#endif
46428+
46429+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46430+ current->mm->call_syscall = 0UL;
46431+#endif
46432+
46433+#ifdef CONFIG_PAX_ASLR
46434+ current->mm->delta_mmap = 0UL;
46435+ current->mm->delta_stack = 0UL;
46436+#endif
46437+
46438+ current->mm->def_flags = 0;
46439+
46440+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
46441+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
46442+ send_sig(SIGKILL, current, 0);
46443+ goto out_free_dentry;
46444+ }
46445+#endif
46446+
46447+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46448+ pax_set_initial_flags(bprm);
46449+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46450+ if (pax_set_initial_flags_func)
46451+ (pax_set_initial_flags_func)(bprm);
46452+#endif
46453+
46454+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46455+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46456+ current->mm->context.user_cs_limit = PAGE_SIZE;
46457+ current->mm->def_flags |= VM_PAGEEXEC;
46458+ }
46459+#endif
46460+
46461+#ifdef CONFIG_PAX_SEGMEXEC
46462+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46463+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46464+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46465+ pax_task_size = SEGMEXEC_TASK_SIZE;
46466+ }
46467+#endif
46468+
46469+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46470+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46471+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46472+ put_cpu();
46473+ }
46474+#endif
46475
46476 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46477 may depend on the personality. */
46478 SET_PERSONALITY(loc->elf_ex);
46479+
46480+#ifdef CONFIG_PAX_ASLR
46481+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46482+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46483+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46484+ }
46485+#endif
46486+
46487+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46488+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46489+ executable_stack = EXSTACK_DISABLE_X;
46490+ current->personality &= ~READ_IMPLIES_EXEC;
46491+ } else
46492+#endif
46493+
46494 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46495 current->personality |= READ_IMPLIES_EXEC;
46496
46497@@ -800,10 +1087,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46498 * might try to exec. This is because the brk will
46499 * follow the loader, and is not movable. */
46500 #ifdef CONFIG_X86
46501- load_bias = 0;
46502+ if (current->flags & PF_RANDOMIZE)
46503+ load_bias = 0;
46504+ else
46505+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46506 #else
46507 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46508 #endif
46509+
46510+#ifdef CONFIG_PAX_RANDMMAP
46511+ /* PaX: randomize base address at the default exe base if requested */
46512+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46513+#ifdef CONFIG_SPARC64
46514+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46515+#else
46516+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46517+#endif
46518+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46519+ elf_flags |= MAP_FIXED;
46520+ }
46521+#endif
46522+
46523 }
46524
46525 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46526@@ -836,9 +1140,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46527 * allowed task size. Note that p_filesz must always be
46528 * <= p_memsz so it is only necessary to check p_memsz.
46529 */
46530- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46531- elf_ppnt->p_memsz > TASK_SIZE ||
46532- TASK_SIZE - elf_ppnt->p_memsz < k) {
46533+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46534+ elf_ppnt->p_memsz > pax_task_size ||
46535+ pax_task_size - elf_ppnt->p_memsz < k) {
46536 /* set_brk can never work. Avoid overflows. */
46537 send_sig(SIGKILL, current, 0);
46538 retval = -EINVAL;
46539@@ -866,6 +1170,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46540 start_data += load_bias;
46541 end_data += load_bias;
46542
46543+#ifdef CONFIG_PAX_RANDMMAP
46544+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46545+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46546+#endif
46547+
46548 /* Calling set_brk effectively mmaps the pages that we need
46549 * for the bss and break sections. We must do this before
46550 * mapping in the interpreter, to make sure it doesn't wind
46551@@ -877,9 +1186,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46552 goto out_free_dentry;
46553 }
46554 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46555- send_sig(SIGSEGV, current, 0);
46556- retval = -EFAULT; /* Nobody gets to see this, but.. */
46557- goto out_free_dentry;
46558+ /*
46559+ * This bss-zeroing can fail if the ELF
46560+ * file specifies odd protections. So
46561+ * we don't check the return value
46562+ */
46563 }
46564
46565 if (elf_interpreter) {
46566@@ -1112,8 +1423,10 @@ static int dump_seek(struct file *file, loff_t off)
46567 unsigned long n = off;
46568 if (n > PAGE_SIZE)
46569 n = PAGE_SIZE;
46570- if (!dump_write(file, buf, n))
46571+ if (!dump_write(file, buf, n)) {
46572+ free_page((unsigned long)buf);
46573 return 0;
46574+ }
46575 off -= n;
46576 }
46577 free_page((unsigned long)buf);
46578@@ -1125,7 +1438,7 @@ static int dump_seek(struct file *file, loff_t off)
46579 * Decide what to dump of a segment, part, all or none.
46580 */
46581 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46582- unsigned long mm_flags)
46583+ unsigned long mm_flags, long signr)
46584 {
46585 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46586
46587@@ -1159,7 +1472,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46588 if (vma->vm_file == NULL)
46589 return 0;
46590
46591- if (FILTER(MAPPED_PRIVATE))
46592+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46593 goto whole;
46594
46595 /*
46596@@ -1255,8 +1568,11 @@ static int writenote(struct memelfnote *men, struct file *file,
46597 #undef DUMP_WRITE
46598
46599 #define DUMP_WRITE(addr, nr) \
46600+ do { \
46601+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
46602 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
46603- goto end_coredump;
46604+ goto end_coredump; \
46605+ } while (0);
46606
46607 static void fill_elf_header(struct elfhdr *elf, int segs,
46608 u16 machine, u32 flags, u8 osabi)
46609@@ -1385,9 +1701,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46610 {
46611 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
46612 int i = 0;
46613- do
46614+ do {
46615 i += 2;
46616- while (auxv[i - 2] != AT_NULL);
46617+ } while (auxv[i - 2] != AT_NULL);
46618 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
46619 }
46620
46621@@ -1973,7 +2289,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46622 phdr.p_offset = offset;
46623 phdr.p_vaddr = vma->vm_start;
46624 phdr.p_paddr = 0;
46625- phdr.p_filesz = vma_dump_size(vma, mm_flags);
46626+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
46627 phdr.p_memsz = vma->vm_end - vma->vm_start;
46628 offset += phdr.p_filesz;
46629 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
46630@@ -2006,7 +2322,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46631 unsigned long addr;
46632 unsigned long end;
46633
46634- end = vma->vm_start + vma_dump_size(vma, mm_flags);
46635+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
46636
46637 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
46638 struct page *page;
46639@@ -2015,6 +2331,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46640 page = get_dump_page(addr);
46641 if (page) {
46642 void *kaddr = kmap(page);
46643+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
46644 stop = ((size += PAGE_SIZE) > limit) ||
46645 !dump_write(file, kaddr, PAGE_SIZE);
46646 kunmap(page);
46647@@ -2042,6 +2359,97 @@ out:
46648
46649 #endif /* USE_ELF_CORE_DUMP */
46650
46651+#ifdef CONFIG_PAX_MPROTECT
46652+/* PaX: non-PIC ELF libraries need relocations on their executable segments
46653+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
46654+ * we'll remove VM_MAYWRITE for good on RELRO segments.
46655+ *
46656+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
46657+ * basis because we want to allow the common case and not the special ones.
46658+ */
46659+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
46660+{
46661+ struct elfhdr elf_h;
46662+ struct elf_phdr elf_p;
46663+ unsigned long i;
46664+ unsigned long oldflags;
46665+ bool is_textrel_rw, is_textrel_rx, is_relro;
46666+
46667+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
46668+ return;
46669+
46670+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
46671+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
46672+
46673+#ifdef CONFIG_PAX_ELFRELOCS
46674+ /* possible TEXTREL */
46675+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
46676+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
46677+#else
46678+ is_textrel_rw = false;
46679+ is_textrel_rx = false;
46680+#endif
46681+
46682+ /* possible RELRO */
46683+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
46684+
46685+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
46686+ return;
46687+
46688+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
46689+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
46690+
46691+#ifdef CONFIG_PAX_ETEXECRELOCS
46692+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46693+#else
46694+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
46695+#endif
46696+
46697+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46698+ !elf_check_arch(&elf_h) ||
46699+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
46700+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
46701+ return;
46702+
46703+ for (i = 0UL; i < elf_h.e_phnum; i++) {
46704+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
46705+ return;
46706+ switch (elf_p.p_type) {
46707+ case PT_DYNAMIC:
46708+ if (!is_textrel_rw && !is_textrel_rx)
46709+ continue;
46710+ i = 0UL;
46711+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
46712+ elf_dyn dyn;
46713+
46714+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
46715+ return;
46716+ if (dyn.d_tag == DT_NULL)
46717+ return;
46718+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
46719+ gr_log_textrel(vma);
46720+ if (is_textrel_rw)
46721+ vma->vm_flags |= VM_MAYWRITE;
46722+ else
46723+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
46724+ vma->vm_flags &= ~VM_MAYWRITE;
46725+ return;
46726+ }
46727+ i++;
46728+ }
46729+ return;
46730+
46731+ case PT_GNU_RELRO:
46732+ if (!is_relro)
46733+ continue;
46734+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
46735+ vma->vm_flags &= ~VM_MAYWRITE;
46736+ return;
46737+ }
46738+ }
46739+}
46740+#endif
46741+
46742 static int __init init_elf_binfmt(void)
46743 {
46744 return register_binfmt(&elf_format);
46745diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
46746index ca88c46..f155a60 100644
46747--- a/fs/binfmt_flat.c
46748+++ b/fs/binfmt_flat.c
46749@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
46750 realdatastart = (unsigned long) -ENOMEM;
46751 printk("Unable to allocate RAM for process data, errno %d\n",
46752 (int)-realdatastart);
46753+ down_write(&current->mm->mmap_sem);
46754 do_munmap(current->mm, textpos, text_len);
46755+ up_write(&current->mm->mmap_sem);
46756 ret = realdatastart;
46757 goto err;
46758 }
46759@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46760 }
46761 if (IS_ERR_VALUE(result)) {
46762 printk("Unable to read data+bss, errno %d\n", (int)-result);
46763+ down_write(&current->mm->mmap_sem);
46764 do_munmap(current->mm, textpos, text_len);
46765 do_munmap(current->mm, realdatastart, data_len + extra);
46766+ up_write(&current->mm->mmap_sem);
46767 ret = result;
46768 goto err;
46769 }
46770@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
46771 }
46772 if (IS_ERR_VALUE(result)) {
46773 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
46774+ down_write(&current->mm->mmap_sem);
46775 do_munmap(current->mm, textpos, text_len + data_len + extra +
46776 MAX_SHARED_LIBS * sizeof(unsigned long));
46777+ up_write(&current->mm->mmap_sem);
46778 ret = result;
46779 goto err;
46780 }
46781diff --git a/fs/bio.c b/fs/bio.c
46782index e696713..83de133 100644
46783--- a/fs/bio.c
46784+++ b/fs/bio.c
46785@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
46786
46787 i = 0;
46788 while (i < bio_slab_nr) {
46789- struct bio_slab *bslab = &bio_slabs[i];
46790+ bslab = &bio_slabs[i];
46791
46792 if (!bslab->slab && entry == -1)
46793 entry = i;
46794@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
46795 const int read = bio_data_dir(bio) == READ;
46796 struct bio_map_data *bmd = bio->bi_private;
46797 int i;
46798- char *p = bmd->sgvecs[0].iov_base;
46799+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
46800
46801 __bio_for_each_segment(bvec, bio, i, 0) {
46802 char *addr = page_address(bvec->bv_page);
46803diff --git a/fs/block_dev.c b/fs/block_dev.c
46804index e65efa2..04fae57 100644
46805--- a/fs/block_dev.c
46806+++ b/fs/block_dev.c
46807@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
46808 else if (bdev->bd_contains == bdev)
46809 res = 0; /* is a whole device which isn't held */
46810
46811- else if (bdev->bd_contains->bd_holder == bd_claim)
46812+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
46813 res = 0; /* is a partition of a device that is being partitioned */
46814 else if (bdev->bd_contains->bd_holder != NULL)
46815 res = -EBUSY; /* is a partition of a held device */
46816diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
46817index c4bc570..42acd8d 100644
46818--- a/fs/btrfs/ctree.c
46819+++ b/fs/btrfs/ctree.c
46820@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
46821 free_extent_buffer(buf);
46822 add_root_to_dirty_list(root);
46823 } else {
46824- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
46825- parent_start = parent->start;
46826- else
46827+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
46828+ if (parent)
46829+ parent_start = parent->start;
46830+ else
46831+ parent_start = 0;
46832+ } else
46833 parent_start = 0;
46834
46835 WARN_ON(trans->transid != btrfs_header_generation(parent));
46836@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
46837
46838 ret = 0;
46839 if (slot == 0) {
46840- struct btrfs_disk_key disk_key;
46841 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
46842 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
46843 }
46844diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
46845index f447188..59c17c5 100644
46846--- a/fs/btrfs/disk-io.c
46847+++ b/fs/btrfs/disk-io.c
46848@@ -39,7 +39,7 @@
46849 #include "tree-log.h"
46850 #include "free-space-cache.h"
46851
46852-static struct extent_io_ops btree_extent_io_ops;
46853+static const struct extent_io_ops btree_extent_io_ops;
46854 static void end_workqueue_fn(struct btrfs_work *work);
46855 static void free_fs_root(struct btrfs_root *root);
46856
46857@@ -2607,7 +2607,7 @@ out:
46858 return 0;
46859 }
46860
46861-static struct extent_io_ops btree_extent_io_ops = {
46862+static const struct extent_io_ops btree_extent_io_ops = {
46863 .write_cache_pages_lock_hook = btree_lock_page_hook,
46864 .readpage_end_io_hook = btree_readpage_end_io_hook,
46865 .submit_bio_hook = btree_submit_bio_hook,
46866diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
46867index 559f724..a026171 100644
46868--- a/fs/btrfs/extent-tree.c
46869+++ b/fs/btrfs/extent-tree.c
46870@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
46871 u64 group_start = group->key.objectid;
46872 new_extents = kmalloc(sizeof(*new_extents),
46873 GFP_NOFS);
46874+ if (!new_extents) {
46875+ ret = -ENOMEM;
46876+ goto out;
46877+ }
46878 nr_extents = 1;
46879 ret = get_new_locations(reloc_inode,
46880 extent_key,
46881diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
46882index 36de250..7ec75c7 100644
46883--- a/fs/btrfs/extent_io.h
46884+++ b/fs/btrfs/extent_io.h
46885@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
46886 struct bio *bio, int mirror_num,
46887 unsigned long bio_flags);
46888 struct extent_io_ops {
46889- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
46890+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
46891 u64 start, u64 end, int *page_started,
46892 unsigned long *nr_written);
46893- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
46894- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
46895+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
46896+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
46897 extent_submit_bio_hook_t *submit_bio_hook;
46898- int (*merge_bio_hook)(struct page *page, unsigned long offset,
46899+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
46900 size_t size, struct bio *bio,
46901 unsigned long bio_flags);
46902- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
46903- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
46904+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
46905+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
46906 u64 start, u64 end,
46907 struct extent_state *state);
46908- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
46909+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
46910 u64 start, u64 end,
46911 struct extent_state *state);
46912- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
46913+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
46914 struct extent_state *state);
46915- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
46916+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
46917 struct extent_state *state, int uptodate);
46918- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
46919+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
46920 unsigned long old, unsigned long bits);
46921- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
46922+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
46923 unsigned long bits);
46924- int (*merge_extent_hook)(struct inode *inode,
46925+ int (* const merge_extent_hook)(struct inode *inode,
46926 struct extent_state *new,
46927 struct extent_state *other);
46928- int (*split_extent_hook)(struct inode *inode,
46929+ int (* const split_extent_hook)(struct inode *inode,
46930 struct extent_state *orig, u64 split);
46931- int (*write_cache_pages_lock_hook)(struct page *page);
46932+ int (* const write_cache_pages_lock_hook)(struct page *page);
46933 };
46934
46935 struct extent_io_tree {
46936@@ -88,7 +88,7 @@ struct extent_io_tree {
46937 u64 dirty_bytes;
46938 spinlock_t lock;
46939 spinlock_t buffer_lock;
46940- struct extent_io_ops *ops;
46941+ const struct extent_io_ops *ops;
46942 };
46943
46944 struct extent_state {
46945diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
46946index cb2849f..3718fb4 100644
46947--- a/fs/btrfs/free-space-cache.c
46948+++ b/fs/btrfs/free-space-cache.c
46949@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
46950
46951 while(1) {
46952 if (entry->bytes < bytes || entry->offset < min_start) {
46953- struct rb_node *node;
46954-
46955 node = rb_next(&entry->offset_index);
46956 if (!node)
46957 break;
46958@@ -1226,7 +1224,7 @@ again:
46959 */
46960 while (entry->bitmap || found_bitmap ||
46961 (!entry->bitmap && entry->bytes < min_bytes)) {
46962- struct rb_node *node = rb_next(&entry->offset_index);
46963+ node = rb_next(&entry->offset_index);
46964
46965 if (entry->bitmap && entry->bytes > bytes + empty_size) {
46966 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
46967diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
46968index e03a836..323837e 100644
46969--- a/fs/btrfs/inode.c
46970+++ b/fs/btrfs/inode.c
46971@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
46972 static const struct address_space_operations btrfs_aops;
46973 static const struct address_space_operations btrfs_symlink_aops;
46974 static const struct file_operations btrfs_dir_file_operations;
46975-static struct extent_io_ops btrfs_extent_io_ops;
46976+static const struct extent_io_ops btrfs_extent_io_ops;
46977
46978 static struct kmem_cache *btrfs_inode_cachep;
46979 struct kmem_cache *btrfs_trans_handle_cachep;
46980@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
46981 1, 0, NULL, GFP_NOFS);
46982 while (start < end) {
46983 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
46984+ BUG_ON(!async_cow);
46985 async_cow->inode = inode;
46986 async_cow->root = root;
46987 async_cow->locked_page = locked_page;
46988@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
46989 inline_size = btrfs_file_extent_inline_item_len(leaf,
46990 btrfs_item_nr(leaf, path->slots[0]));
46991 tmp = kmalloc(inline_size, GFP_NOFS);
46992+ if (!tmp)
46993+ return -ENOMEM;
46994 ptr = btrfs_file_extent_inline_start(item);
46995
46996 read_extent_buffer(leaf, tmp, ptr, inline_size);
46997@@ -5410,7 +5413,7 @@ fail:
46998 return -ENOMEM;
46999 }
47000
47001-static int btrfs_getattr(struct vfsmount *mnt,
47002+int btrfs_getattr(struct vfsmount *mnt,
47003 struct dentry *dentry, struct kstat *stat)
47004 {
47005 struct inode *inode = dentry->d_inode;
47006@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47007 return 0;
47008 }
47009
47010+EXPORT_SYMBOL(btrfs_getattr);
47011+
47012+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47013+{
47014+ return BTRFS_I(inode)->root->anon_super.s_dev;
47015+}
47016+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47017+
47018 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47019 struct inode *new_dir, struct dentry *new_dentry)
47020 {
47021@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47022 .fsync = btrfs_sync_file,
47023 };
47024
47025-static struct extent_io_ops btrfs_extent_io_ops = {
47026+static const struct extent_io_ops btrfs_extent_io_ops = {
47027 .fill_delalloc = run_delalloc_range,
47028 .submit_bio_hook = btrfs_submit_bio_hook,
47029 .merge_bio_hook = btrfs_merge_bio_hook,
47030diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47031index ab7ab53..94e0781 100644
47032--- a/fs/btrfs/relocation.c
47033+++ b/fs/btrfs/relocation.c
47034@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47035 }
47036 spin_unlock(&rc->reloc_root_tree.lock);
47037
47038- BUG_ON((struct btrfs_root *)node->data != root);
47039+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47040
47041 if (!del) {
47042 spin_lock(&rc->reloc_root_tree.lock);
47043diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47044index a240b6f..4ce16ef 100644
47045--- a/fs/btrfs/sysfs.c
47046+++ b/fs/btrfs/sysfs.c
47047@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47048 complete(&root->kobj_unregister);
47049 }
47050
47051-static struct sysfs_ops btrfs_super_attr_ops = {
47052+static const struct sysfs_ops btrfs_super_attr_ops = {
47053 .show = btrfs_super_attr_show,
47054 .store = btrfs_super_attr_store,
47055 };
47056
47057-static struct sysfs_ops btrfs_root_attr_ops = {
47058+static const struct sysfs_ops btrfs_root_attr_ops = {
47059 .show = btrfs_root_attr_show,
47060 .store = btrfs_root_attr_store,
47061 };
47062diff --git a/fs/buffer.c b/fs/buffer.c
47063index 6fa5302..395d9f6 100644
47064--- a/fs/buffer.c
47065+++ b/fs/buffer.c
47066@@ -25,6 +25,7 @@
47067 #include <linux/percpu.h>
47068 #include <linux/slab.h>
47069 #include <linux/capability.h>
47070+#include <linux/security.h>
47071 #include <linux/blkdev.h>
47072 #include <linux/file.h>
47073 #include <linux/quotaops.h>
47074diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47075index 3797e00..ce776f6 100644
47076--- a/fs/cachefiles/bind.c
47077+++ b/fs/cachefiles/bind.c
47078@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47079 args);
47080
47081 /* start by checking things over */
47082- ASSERT(cache->fstop_percent >= 0 &&
47083- cache->fstop_percent < cache->fcull_percent &&
47084+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47085 cache->fcull_percent < cache->frun_percent &&
47086 cache->frun_percent < 100);
47087
47088- ASSERT(cache->bstop_percent >= 0 &&
47089- cache->bstop_percent < cache->bcull_percent &&
47090+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47091 cache->bcull_percent < cache->brun_percent &&
47092 cache->brun_percent < 100);
47093
47094diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47095index 4618516..bb30d01 100644
47096--- a/fs/cachefiles/daemon.c
47097+++ b/fs/cachefiles/daemon.c
47098@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47099 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47100 return -EIO;
47101
47102- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47103+ if (datalen > PAGE_SIZE - 1)
47104 return -EOPNOTSUPP;
47105
47106 /* drag the command string into the kernel so we can parse it */
47107@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47108 if (args[0] != '%' || args[1] != '\0')
47109 return -EINVAL;
47110
47111- if (fstop < 0 || fstop >= cache->fcull_percent)
47112+ if (fstop >= cache->fcull_percent)
47113 return cachefiles_daemon_range_error(cache, args);
47114
47115 cache->fstop_percent = fstop;
47116@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47117 if (args[0] != '%' || args[1] != '\0')
47118 return -EINVAL;
47119
47120- if (bstop < 0 || bstop >= cache->bcull_percent)
47121+ if (bstop >= cache->bcull_percent)
47122 return cachefiles_daemon_range_error(cache, args);
47123
47124 cache->bstop_percent = bstop;
47125diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47126index f7c255f..fcd61de 100644
47127--- a/fs/cachefiles/internal.h
47128+++ b/fs/cachefiles/internal.h
47129@@ -56,7 +56,7 @@ struct cachefiles_cache {
47130 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47131 struct rb_root active_nodes; /* active nodes (can't be culled) */
47132 rwlock_t active_lock; /* lock for active_nodes */
47133- atomic_t gravecounter; /* graveyard uniquifier */
47134+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47135 unsigned frun_percent; /* when to stop culling (% files) */
47136 unsigned fcull_percent; /* when to start culling (% files) */
47137 unsigned fstop_percent; /* when to stop allocating (% files) */
47138@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47139 * proc.c
47140 */
47141 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47142-extern atomic_t cachefiles_lookup_histogram[HZ];
47143-extern atomic_t cachefiles_mkdir_histogram[HZ];
47144-extern atomic_t cachefiles_create_histogram[HZ];
47145+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47146+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47147+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47148
47149 extern int __init cachefiles_proc_init(void);
47150 extern void cachefiles_proc_cleanup(void);
47151 static inline
47152-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47153+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47154 {
47155 unsigned long jif = jiffies - start_jif;
47156 if (jif >= HZ)
47157 jif = HZ - 1;
47158- atomic_inc(&histogram[jif]);
47159+ atomic_inc_unchecked(&histogram[jif]);
47160 }
47161
47162 #else
47163diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47164index 14ac480..a62766c 100644
47165--- a/fs/cachefiles/namei.c
47166+++ b/fs/cachefiles/namei.c
47167@@ -250,7 +250,7 @@ try_again:
47168 /* first step is to make up a grave dentry in the graveyard */
47169 sprintf(nbuffer, "%08x%08x",
47170 (uint32_t) get_seconds(),
47171- (uint32_t) atomic_inc_return(&cache->gravecounter));
47172+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47173
47174 /* do the multiway lock magic */
47175 trap = lock_rename(cache->graveyard, dir);
47176diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47177index eccd339..4c1d995 100644
47178--- a/fs/cachefiles/proc.c
47179+++ b/fs/cachefiles/proc.c
47180@@ -14,9 +14,9 @@
47181 #include <linux/seq_file.h>
47182 #include "internal.h"
47183
47184-atomic_t cachefiles_lookup_histogram[HZ];
47185-atomic_t cachefiles_mkdir_histogram[HZ];
47186-atomic_t cachefiles_create_histogram[HZ];
47187+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47188+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47189+atomic_unchecked_t cachefiles_create_histogram[HZ];
47190
47191 /*
47192 * display the latency histogram
47193@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47194 return 0;
47195 default:
47196 index = (unsigned long) v - 3;
47197- x = atomic_read(&cachefiles_lookup_histogram[index]);
47198- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47199- z = atomic_read(&cachefiles_create_histogram[index]);
47200+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47201+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47202+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47203 if (x == 0 && y == 0 && z == 0)
47204 return 0;
47205
47206diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47207index a6c8c6f..5cf8517 100644
47208--- a/fs/cachefiles/rdwr.c
47209+++ b/fs/cachefiles/rdwr.c
47210@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47211 old_fs = get_fs();
47212 set_fs(KERNEL_DS);
47213 ret = file->f_op->write(
47214- file, (const void __user *) data, len, &pos);
47215+ file, (const void __force_user *) data, len, &pos);
47216 set_fs(old_fs);
47217 kunmap(page);
47218 if (ret != len)
47219diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47220index 42cec2a..2aba466 100644
47221--- a/fs/cifs/cifs_debug.c
47222+++ b/fs/cifs/cifs_debug.c
47223@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47224 tcon = list_entry(tmp3,
47225 struct cifsTconInfo,
47226 tcon_list);
47227- atomic_set(&tcon->num_smbs_sent, 0);
47228- atomic_set(&tcon->num_writes, 0);
47229- atomic_set(&tcon->num_reads, 0);
47230- atomic_set(&tcon->num_oplock_brks, 0);
47231- atomic_set(&tcon->num_opens, 0);
47232- atomic_set(&tcon->num_posixopens, 0);
47233- atomic_set(&tcon->num_posixmkdirs, 0);
47234- atomic_set(&tcon->num_closes, 0);
47235- atomic_set(&tcon->num_deletes, 0);
47236- atomic_set(&tcon->num_mkdirs, 0);
47237- atomic_set(&tcon->num_rmdirs, 0);
47238- atomic_set(&tcon->num_renames, 0);
47239- atomic_set(&tcon->num_t2renames, 0);
47240- atomic_set(&tcon->num_ffirst, 0);
47241- atomic_set(&tcon->num_fnext, 0);
47242- atomic_set(&tcon->num_fclose, 0);
47243- atomic_set(&tcon->num_hardlinks, 0);
47244- atomic_set(&tcon->num_symlinks, 0);
47245- atomic_set(&tcon->num_locks, 0);
47246+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47247+ atomic_set_unchecked(&tcon->num_writes, 0);
47248+ atomic_set_unchecked(&tcon->num_reads, 0);
47249+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47250+ atomic_set_unchecked(&tcon->num_opens, 0);
47251+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47252+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47253+ atomic_set_unchecked(&tcon->num_closes, 0);
47254+ atomic_set_unchecked(&tcon->num_deletes, 0);
47255+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47256+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47257+ atomic_set_unchecked(&tcon->num_renames, 0);
47258+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47259+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47260+ atomic_set_unchecked(&tcon->num_fnext, 0);
47261+ atomic_set_unchecked(&tcon->num_fclose, 0);
47262+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47263+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47264+ atomic_set_unchecked(&tcon->num_locks, 0);
47265 }
47266 }
47267 }
47268@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47269 if (tcon->need_reconnect)
47270 seq_puts(m, "\tDISCONNECTED ");
47271 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47272- atomic_read(&tcon->num_smbs_sent),
47273- atomic_read(&tcon->num_oplock_brks));
47274+ atomic_read_unchecked(&tcon->num_smbs_sent),
47275+ atomic_read_unchecked(&tcon->num_oplock_brks));
47276 seq_printf(m, "\nReads: %d Bytes: %lld",
47277- atomic_read(&tcon->num_reads),
47278+ atomic_read_unchecked(&tcon->num_reads),
47279 (long long)(tcon->bytes_read));
47280 seq_printf(m, "\nWrites: %d Bytes: %lld",
47281- atomic_read(&tcon->num_writes),
47282+ atomic_read_unchecked(&tcon->num_writes),
47283 (long long)(tcon->bytes_written));
47284 seq_printf(m, "\nFlushes: %d",
47285- atomic_read(&tcon->num_flushes));
47286+ atomic_read_unchecked(&tcon->num_flushes));
47287 seq_printf(m, "\nLocks: %d HardLinks: %d "
47288 "Symlinks: %d",
47289- atomic_read(&tcon->num_locks),
47290- atomic_read(&tcon->num_hardlinks),
47291- atomic_read(&tcon->num_symlinks));
47292+ atomic_read_unchecked(&tcon->num_locks),
47293+ atomic_read_unchecked(&tcon->num_hardlinks),
47294+ atomic_read_unchecked(&tcon->num_symlinks));
47295 seq_printf(m, "\nOpens: %d Closes: %d "
47296 "Deletes: %d",
47297- atomic_read(&tcon->num_opens),
47298- atomic_read(&tcon->num_closes),
47299- atomic_read(&tcon->num_deletes));
47300+ atomic_read_unchecked(&tcon->num_opens),
47301+ atomic_read_unchecked(&tcon->num_closes),
47302+ atomic_read_unchecked(&tcon->num_deletes));
47303 seq_printf(m, "\nPosix Opens: %d "
47304 "Posix Mkdirs: %d",
47305- atomic_read(&tcon->num_posixopens),
47306- atomic_read(&tcon->num_posixmkdirs));
47307+ atomic_read_unchecked(&tcon->num_posixopens),
47308+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47309 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47310- atomic_read(&tcon->num_mkdirs),
47311- atomic_read(&tcon->num_rmdirs));
47312+ atomic_read_unchecked(&tcon->num_mkdirs),
47313+ atomic_read_unchecked(&tcon->num_rmdirs));
47314 seq_printf(m, "\nRenames: %d T2 Renames %d",
47315- atomic_read(&tcon->num_renames),
47316- atomic_read(&tcon->num_t2renames));
47317+ atomic_read_unchecked(&tcon->num_renames),
47318+ atomic_read_unchecked(&tcon->num_t2renames));
47319 seq_printf(m, "\nFindFirst: %d FNext %d "
47320 "FClose %d",
47321- atomic_read(&tcon->num_ffirst),
47322- atomic_read(&tcon->num_fnext),
47323- atomic_read(&tcon->num_fclose));
47324+ atomic_read_unchecked(&tcon->num_ffirst),
47325+ atomic_read_unchecked(&tcon->num_fnext),
47326+ atomic_read_unchecked(&tcon->num_fclose));
47327 }
47328 }
47329 }
47330diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47331index 1445407..68cb0dc 100644
47332--- a/fs/cifs/cifsfs.c
47333+++ b/fs/cifs/cifsfs.c
47334@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47335 cifs_req_cachep = kmem_cache_create("cifs_request",
47336 CIFSMaxBufSize +
47337 MAX_CIFS_HDR_SIZE, 0,
47338- SLAB_HWCACHE_ALIGN, NULL);
47339+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47340 if (cifs_req_cachep == NULL)
47341 return -ENOMEM;
47342
47343@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47344 efficient to alloc 1 per page off the slab compared to 17K (5page)
47345 alloc of large cifs buffers even when page debugging is on */
47346 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47347- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47348+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47349 NULL);
47350 if (cifs_sm_req_cachep == NULL) {
47351 mempool_destroy(cifs_req_poolp);
47352@@ -991,8 +991,8 @@ init_cifs(void)
47353 atomic_set(&bufAllocCount, 0);
47354 atomic_set(&smBufAllocCount, 0);
47355 #ifdef CONFIG_CIFS_STATS2
47356- atomic_set(&totBufAllocCount, 0);
47357- atomic_set(&totSmBufAllocCount, 0);
47358+ atomic_set_unchecked(&totBufAllocCount, 0);
47359+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47360 #endif /* CONFIG_CIFS_STATS2 */
47361
47362 atomic_set(&midCount, 0);
47363diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47364index e29581e..1c22bab 100644
47365--- a/fs/cifs/cifsglob.h
47366+++ b/fs/cifs/cifsglob.h
47367@@ -252,28 +252,28 @@ struct cifsTconInfo {
47368 __u16 Flags; /* optional support bits */
47369 enum statusEnum tidStatus;
47370 #ifdef CONFIG_CIFS_STATS
47371- atomic_t num_smbs_sent;
47372- atomic_t num_writes;
47373- atomic_t num_reads;
47374- atomic_t num_flushes;
47375- atomic_t num_oplock_brks;
47376- atomic_t num_opens;
47377- atomic_t num_closes;
47378- atomic_t num_deletes;
47379- atomic_t num_mkdirs;
47380- atomic_t num_posixopens;
47381- atomic_t num_posixmkdirs;
47382- atomic_t num_rmdirs;
47383- atomic_t num_renames;
47384- atomic_t num_t2renames;
47385- atomic_t num_ffirst;
47386- atomic_t num_fnext;
47387- atomic_t num_fclose;
47388- atomic_t num_hardlinks;
47389- atomic_t num_symlinks;
47390- atomic_t num_locks;
47391- atomic_t num_acl_get;
47392- atomic_t num_acl_set;
47393+ atomic_unchecked_t num_smbs_sent;
47394+ atomic_unchecked_t num_writes;
47395+ atomic_unchecked_t num_reads;
47396+ atomic_unchecked_t num_flushes;
47397+ atomic_unchecked_t num_oplock_brks;
47398+ atomic_unchecked_t num_opens;
47399+ atomic_unchecked_t num_closes;
47400+ atomic_unchecked_t num_deletes;
47401+ atomic_unchecked_t num_mkdirs;
47402+ atomic_unchecked_t num_posixopens;
47403+ atomic_unchecked_t num_posixmkdirs;
47404+ atomic_unchecked_t num_rmdirs;
47405+ atomic_unchecked_t num_renames;
47406+ atomic_unchecked_t num_t2renames;
47407+ atomic_unchecked_t num_ffirst;
47408+ atomic_unchecked_t num_fnext;
47409+ atomic_unchecked_t num_fclose;
47410+ atomic_unchecked_t num_hardlinks;
47411+ atomic_unchecked_t num_symlinks;
47412+ atomic_unchecked_t num_locks;
47413+ atomic_unchecked_t num_acl_get;
47414+ atomic_unchecked_t num_acl_set;
47415 #ifdef CONFIG_CIFS_STATS2
47416 unsigned long long time_writes;
47417 unsigned long long time_reads;
47418@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47419 }
47420
47421 #ifdef CONFIG_CIFS_STATS
47422-#define cifs_stats_inc atomic_inc
47423+#define cifs_stats_inc atomic_inc_unchecked
47424
47425 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47426 unsigned int bytes)
47427@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47428 /* Various Debug counters */
47429 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47430 #ifdef CONFIG_CIFS_STATS2
47431-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47432-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47433+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47434+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47435 #endif
47436 GLOBAL_EXTERN atomic_t smBufAllocCount;
47437 GLOBAL_EXTERN atomic_t midCount;
47438diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47439index fc1e048..28b3441 100644
47440--- a/fs/cifs/link.c
47441+++ b/fs/cifs/link.c
47442@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47443
47444 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47445 {
47446- char *p = nd_get_link(nd);
47447+ const char *p = nd_get_link(nd);
47448 if (!IS_ERR(p))
47449 kfree(p);
47450 }
47451diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47452index d27d4ec..8d0a444 100644
47453--- a/fs/cifs/misc.c
47454+++ b/fs/cifs/misc.c
47455@@ -155,7 +155,7 @@ cifs_buf_get(void)
47456 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47457 atomic_inc(&bufAllocCount);
47458 #ifdef CONFIG_CIFS_STATS2
47459- atomic_inc(&totBufAllocCount);
47460+ atomic_inc_unchecked(&totBufAllocCount);
47461 #endif /* CONFIG_CIFS_STATS2 */
47462 }
47463
47464@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47465 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47466 atomic_inc(&smBufAllocCount);
47467 #ifdef CONFIG_CIFS_STATS2
47468- atomic_inc(&totSmBufAllocCount);
47469+ atomic_inc_unchecked(&totSmBufAllocCount);
47470 #endif /* CONFIG_CIFS_STATS2 */
47471
47472 }
47473diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47474index a5bf577..6d19845 100644
47475--- a/fs/coda/cache.c
47476+++ b/fs/coda/cache.c
47477@@ -24,14 +24,14 @@
47478 #include <linux/coda_fs_i.h>
47479 #include <linux/coda_cache.h>
47480
47481-static atomic_t permission_epoch = ATOMIC_INIT(0);
47482+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47483
47484 /* replace or extend an acl cache hit */
47485 void coda_cache_enter(struct inode *inode, int mask)
47486 {
47487 struct coda_inode_info *cii = ITOC(inode);
47488
47489- cii->c_cached_epoch = atomic_read(&permission_epoch);
47490+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47491 if (cii->c_uid != current_fsuid()) {
47492 cii->c_uid = current_fsuid();
47493 cii->c_cached_perm = mask;
47494@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47495 void coda_cache_clear_inode(struct inode *inode)
47496 {
47497 struct coda_inode_info *cii = ITOC(inode);
47498- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47499+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47500 }
47501
47502 /* remove all acl caches */
47503 void coda_cache_clear_all(struct super_block *sb)
47504 {
47505- atomic_inc(&permission_epoch);
47506+ atomic_inc_unchecked(&permission_epoch);
47507 }
47508
47509
47510@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47511
47512 hit = (mask & cii->c_cached_perm) == mask &&
47513 cii->c_uid == current_fsuid() &&
47514- cii->c_cached_epoch == atomic_read(&permission_epoch);
47515+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47516
47517 return hit;
47518 }
47519diff --git a/fs/compat.c b/fs/compat.c
47520index d1e2411..27064e4 100644
47521--- a/fs/compat.c
47522+++ b/fs/compat.c
47523@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47524 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47525 {
47526 compat_ino_t ino = stat->ino;
47527- typeof(ubuf->st_uid) uid = 0;
47528- typeof(ubuf->st_gid) gid = 0;
47529+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47530+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47531 int err;
47532
47533 SET_UID(uid, stat->uid);
47534@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47535
47536 set_fs(KERNEL_DS);
47537 /* The __user pointer cast is valid because of the set_fs() */
47538- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47539+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47540 set_fs(oldfs);
47541 /* truncating is ok because it's a user address */
47542 if (!ret)
47543@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47544
47545 struct compat_readdir_callback {
47546 struct compat_old_linux_dirent __user *dirent;
47547+ struct file * file;
47548 int result;
47549 };
47550
47551@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47552 buf->result = -EOVERFLOW;
47553 return -EOVERFLOW;
47554 }
47555+
47556+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47557+ return 0;
47558+
47559 buf->result++;
47560 dirent = buf->dirent;
47561 if (!access_ok(VERIFY_WRITE, dirent,
47562@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47563
47564 buf.result = 0;
47565 buf.dirent = dirent;
47566+ buf.file = file;
47567
47568 error = vfs_readdir(file, compat_fillonedir, &buf);
47569 if (buf.result)
47570@@ -899,6 +905,7 @@ struct compat_linux_dirent {
47571 struct compat_getdents_callback {
47572 struct compat_linux_dirent __user *current_dir;
47573 struct compat_linux_dirent __user *previous;
47574+ struct file * file;
47575 int count;
47576 int error;
47577 };
47578@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47579 buf->error = -EOVERFLOW;
47580 return -EOVERFLOW;
47581 }
47582+
47583+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47584+ return 0;
47585+
47586 dirent = buf->previous;
47587 if (dirent) {
47588 if (__put_user(offset, &dirent->d_off))
47589@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47590 buf.previous = NULL;
47591 buf.count = count;
47592 buf.error = 0;
47593+ buf.file = file;
47594
47595 error = vfs_readdir(file, compat_filldir, &buf);
47596 if (error >= 0)
47597@@ -987,6 +999,7 @@ out:
47598 struct compat_getdents_callback64 {
47599 struct linux_dirent64 __user *current_dir;
47600 struct linux_dirent64 __user *previous;
47601+ struct file * file;
47602 int count;
47603 int error;
47604 };
47605@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47606 buf->error = -EINVAL; /* only used if we fail.. */
47607 if (reclen > buf->count)
47608 return -EINVAL;
47609+
47610+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47611+ return 0;
47612+
47613 dirent = buf->previous;
47614
47615 if (dirent) {
47616@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
47617 buf.previous = NULL;
47618 buf.count = count;
47619 buf.error = 0;
47620+ buf.file = file;
47621
47622 error = vfs_readdir(file, compat_filldir64, &buf);
47623 if (error >= 0)
47624 error = buf.error;
47625 lastdirent = buf.previous;
47626 if (lastdirent) {
47627- typeof(lastdirent->d_off) d_off = file->f_pos;
47628+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47629 if (__put_user_unaligned(d_off, &lastdirent->d_off))
47630 error = -EFAULT;
47631 else
47632@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
47633 * verify all the pointers
47634 */
47635 ret = -EINVAL;
47636- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
47637+ if (nr_segs > UIO_MAXIOV)
47638 goto out;
47639 if (!file->f_op)
47640 goto out;
47641@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
47642 compat_uptr_t __user *envp,
47643 struct pt_regs * regs)
47644 {
47645+#ifdef CONFIG_GRKERNSEC
47646+ struct file *old_exec_file;
47647+ struct acl_subject_label *old_acl;
47648+ struct rlimit old_rlim[RLIM_NLIMITS];
47649+#endif
47650 struct linux_binprm *bprm;
47651 struct file *file;
47652 struct files_struct *displaced;
47653 bool clear_in_exec;
47654 int retval;
47655+ const struct cred *cred = current_cred();
47656+
47657+ /*
47658+ * We move the actual failure in case of RLIMIT_NPROC excess from
47659+ * set*uid() to execve() because too many poorly written programs
47660+ * don't check setuid() return code. Here we additionally recheck
47661+ * whether NPROC limit is still exceeded.
47662+ */
47663+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
47664+
47665+ if ((current->flags & PF_NPROC_EXCEEDED) &&
47666+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
47667+ retval = -EAGAIN;
47668+ goto out_ret;
47669+ }
47670+
47671+ /* We're below the limit (still or again), so we don't want to make
47672+ * further execve() calls fail. */
47673+ current->flags &= ~PF_NPROC_EXCEEDED;
47674
47675 retval = unshare_files(&displaced);
47676 if (retval)
47677@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
47678 bprm->filename = filename;
47679 bprm->interp = filename;
47680
47681+ if (gr_process_user_ban()) {
47682+ retval = -EPERM;
47683+ goto out_file;
47684+ }
47685+
47686+ retval = -EACCES;
47687+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
47688+ goto out_file;
47689+
47690 retval = bprm_mm_init(bprm);
47691 if (retval)
47692 goto out_file;
47693@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
47694 if (retval < 0)
47695 goto out;
47696
47697+ if (!gr_tpe_allow(file)) {
47698+ retval = -EACCES;
47699+ goto out;
47700+ }
47701+
47702+ if (gr_check_crash_exec(file)) {
47703+ retval = -EACCES;
47704+ goto out;
47705+ }
47706+
47707+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
47708+
47709+ gr_handle_exec_args_compat(bprm, argv);
47710+
47711+#ifdef CONFIG_GRKERNSEC
47712+ old_acl = current->acl;
47713+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
47714+ old_exec_file = current->exec_file;
47715+ get_file(file);
47716+ current->exec_file = file;
47717+#endif
47718+
47719+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
47720+ bprm->unsafe & LSM_UNSAFE_SHARE);
47721+ if (retval < 0)
47722+ goto out_fail;
47723+
47724 retval = search_binary_handler(bprm, regs);
47725 if (retval < 0)
47726- goto out;
47727+ goto out_fail;
47728+#ifdef CONFIG_GRKERNSEC
47729+ if (old_exec_file)
47730+ fput(old_exec_file);
47731+#endif
47732
47733 /* execve succeeded */
47734 current->fs->in_exec = 0;
47735@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
47736 put_files_struct(displaced);
47737 return retval;
47738
47739+out_fail:
47740+#ifdef CONFIG_GRKERNSEC
47741+ current->acl = old_acl;
47742+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
47743+ fput(current->exec_file);
47744+ current->exec_file = old_exec_file;
47745+#endif
47746+
47747 out:
47748 if (bprm->mm) {
47749 acct_arg_size(bprm, 0);
47750@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
47751 struct fdtable *fdt;
47752 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
47753
47754+ pax_track_stack();
47755+
47756 if (n < 0)
47757 goto out_nofds;
47758
47759@@ -2151,7 +2243,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
47760 oldfs = get_fs();
47761 set_fs(KERNEL_DS);
47762 /* The __user pointer casts are valid because of the set_fs() */
47763- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
47764+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
47765 set_fs(oldfs);
47766
47767 if (err)
47768diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
47769index 0adced2..bbb1b0d 100644
47770--- a/fs/compat_binfmt_elf.c
47771+++ b/fs/compat_binfmt_elf.c
47772@@ -29,10 +29,12 @@
47773 #undef elfhdr
47774 #undef elf_phdr
47775 #undef elf_note
47776+#undef elf_dyn
47777 #undef elf_addr_t
47778 #define elfhdr elf32_hdr
47779 #define elf_phdr elf32_phdr
47780 #define elf_note elf32_note
47781+#define elf_dyn Elf32_Dyn
47782 #define elf_addr_t Elf32_Addr
47783
47784 /*
47785diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
47786index d84e705..d8c364c 100644
47787--- a/fs/compat_ioctl.c
47788+++ b/fs/compat_ioctl.c
47789@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
47790 up = (struct compat_video_spu_palette __user *) arg;
47791 err = get_user(palp, &up->palette);
47792 err |= get_user(length, &up->length);
47793+ if (err)
47794+ return -EFAULT;
47795
47796 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
47797 err = put_user(compat_ptr(palp), &up_native->palette);
47798@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
47799 return -EFAULT;
47800 if (__get_user(udata, &ss32->iomem_base))
47801 return -EFAULT;
47802- ss.iomem_base = compat_ptr(udata);
47803+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
47804 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
47805 __get_user(ss.port_high, &ss32->port_high))
47806 return -EFAULT;
47807@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
47808 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
47809 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
47810 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
47811- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47812+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
47813 return -EFAULT;
47814
47815 return ioctl_preallocate(file, p);
47816diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
47817index 8e48b52..f01ed91 100644
47818--- a/fs/configfs/dir.c
47819+++ b/fs/configfs/dir.c
47820@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47821 }
47822 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
47823 struct configfs_dirent *next;
47824- const char * name;
47825+ const unsigned char * name;
47826+ char d_name[sizeof(next->s_dentry->d_iname)];
47827 int len;
47828
47829 next = list_entry(p, struct configfs_dirent,
47830@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
47831 continue;
47832
47833 name = configfs_get_name(next);
47834- len = strlen(name);
47835+ if (next->s_dentry && name == next->s_dentry->d_iname) {
47836+ len = next->s_dentry->d_name.len;
47837+ memcpy(d_name, name, len);
47838+ name = d_name;
47839+ } else
47840+ len = strlen(name);
47841 if (next->s_dentry)
47842 ino = next->s_dentry->d_inode->i_ino;
47843 else
47844diff --git a/fs/dcache.c b/fs/dcache.c
47845index 44c0aea..2529092 100644
47846--- a/fs/dcache.c
47847+++ b/fs/dcache.c
47848@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
47849
47850 static struct kmem_cache *dentry_cache __read_mostly;
47851
47852-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
47853-
47854 /*
47855 * This is the single most critical data structure when it comes
47856 * to the dcache: the hashtable for lookups. Somebody should try
47857@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
47858 mempages -= reserve;
47859
47860 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
47861- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
47862+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
47863
47864 dcache_init();
47865 inode_init();
47866diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
47867index c010ecf..a8d8c59 100644
47868--- a/fs/dlm/lockspace.c
47869+++ b/fs/dlm/lockspace.c
47870@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
47871 kfree(ls);
47872 }
47873
47874-static struct sysfs_ops dlm_attr_ops = {
47875+static const struct sysfs_ops dlm_attr_ops = {
47876 .show = dlm_attr_show,
47877 .store = dlm_attr_store,
47878 };
47879diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
47880index 88ba4d4..073f003 100644
47881--- a/fs/ecryptfs/inode.c
47882+++ b/fs/ecryptfs/inode.c
47883@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
47884 old_fs = get_fs();
47885 set_fs(get_ds());
47886 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
47887- (char __user *)lower_buf,
47888+ (char __force_user *)lower_buf,
47889 lower_bufsiz);
47890 set_fs(old_fs);
47891 if (rc < 0)
47892@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47893 }
47894 old_fs = get_fs();
47895 set_fs(get_ds());
47896- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
47897+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
47898 set_fs(old_fs);
47899 if (rc < 0)
47900 goto out_free;
47901diff --git a/fs/exec.c b/fs/exec.c
47902index 86fafc6..b307bfa 100644
47903--- a/fs/exec.c
47904+++ b/fs/exec.c
47905@@ -56,12 +56,24 @@
47906 #include <linux/fsnotify.h>
47907 #include <linux/fs_struct.h>
47908 #include <linux/pipe_fs_i.h>
47909+#include <linux/random.h>
47910+#include <linux/seq_file.h>
47911+
47912+#ifdef CONFIG_PAX_REFCOUNT
47913+#include <linux/kallsyms.h>
47914+#include <linux/kdebug.h>
47915+#endif
47916
47917 #include <asm/uaccess.h>
47918 #include <asm/mmu_context.h>
47919 #include <asm/tlb.h>
47920 #include "internal.h"
47921
47922+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
47923+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
47924+EXPORT_SYMBOL(pax_set_initial_flags_func);
47925+#endif
47926+
47927 int core_uses_pid;
47928 char core_pattern[CORENAME_MAX_SIZE] = "core";
47929 unsigned int core_pipe_limit;
47930@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
47931 int write)
47932 {
47933 struct page *page;
47934- int ret;
47935
47936-#ifdef CONFIG_STACK_GROWSUP
47937- if (write) {
47938- ret = expand_stack_downwards(bprm->vma, pos);
47939- if (ret < 0)
47940- return NULL;
47941- }
47942-#endif
47943- ret = get_user_pages(current, bprm->mm, pos,
47944- 1, write, 1, &page, NULL);
47945- if (ret <= 0)
47946+ if (0 > expand_stack_downwards(bprm->vma, pos))
47947+ return NULL;
47948+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
47949 return NULL;
47950
47951 if (write) {
47952@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
47953 vma->vm_end = STACK_TOP_MAX;
47954 vma->vm_start = vma->vm_end - PAGE_SIZE;
47955 vma->vm_flags = VM_STACK_FLAGS;
47956+
47957+#ifdef CONFIG_PAX_SEGMEXEC
47958+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
47959+#endif
47960+
47961 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
47962
47963 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
47964@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
47965 mm->stack_vm = mm->total_vm = 1;
47966 up_write(&mm->mmap_sem);
47967 bprm->p = vma->vm_end - sizeof(void *);
47968+
47969+#ifdef CONFIG_PAX_RANDUSTACK
47970+ if (randomize_va_space)
47971+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
47972+#endif
47973+
47974 return 0;
47975 err:
47976 up_write(&mm->mmap_sem);
47977@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
47978 int r;
47979 mm_segment_t oldfs = get_fs();
47980 set_fs(KERNEL_DS);
47981- r = copy_strings(argc, (char __user * __user *)argv, bprm);
47982+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
47983 set_fs(oldfs);
47984 return r;
47985 }
47986@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
47987 unsigned long new_end = old_end - shift;
47988 struct mmu_gather *tlb;
47989
47990- BUG_ON(new_start > new_end);
47991+ if (new_start >= new_end || new_start < mmap_min_addr)
47992+ return -ENOMEM;
47993
47994 /*
47995 * ensure there are no vmas between where we want to go
47996@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
47997 if (vma != find_vma(mm, new_start))
47998 return -EFAULT;
47999
48000+#ifdef CONFIG_PAX_SEGMEXEC
48001+ BUG_ON(pax_find_mirror_vma(vma));
48002+#endif
48003+
48004 /*
48005 * cover the whole range: [new_start, old_end)
48006 */
48007@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48008 stack_top = arch_align_stack(stack_top);
48009 stack_top = PAGE_ALIGN(stack_top);
48010
48011- if (unlikely(stack_top < mmap_min_addr) ||
48012- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48013- return -ENOMEM;
48014-
48015 stack_shift = vma->vm_end - stack_top;
48016
48017 bprm->p -= stack_shift;
48018@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48019 bprm->exec -= stack_shift;
48020
48021 down_write(&mm->mmap_sem);
48022+
48023+ /* Move stack pages down in memory. */
48024+ if (stack_shift) {
48025+ ret = shift_arg_pages(vma, stack_shift);
48026+ if (ret)
48027+ goto out_unlock;
48028+ }
48029+
48030 vm_flags = VM_STACK_FLAGS;
48031
48032 /*
48033@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48034 vm_flags &= ~VM_EXEC;
48035 vm_flags |= mm->def_flags;
48036
48037+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48038+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48039+ vm_flags &= ~VM_EXEC;
48040+
48041+#ifdef CONFIG_PAX_MPROTECT
48042+ if (mm->pax_flags & MF_PAX_MPROTECT)
48043+ vm_flags &= ~VM_MAYEXEC;
48044+#endif
48045+
48046+ }
48047+#endif
48048+
48049 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48050 vm_flags);
48051 if (ret)
48052 goto out_unlock;
48053 BUG_ON(prev != vma);
48054
48055- /* Move stack pages down in memory. */
48056- if (stack_shift) {
48057- ret = shift_arg_pages(vma, stack_shift);
48058- if (ret)
48059- goto out_unlock;
48060- }
48061-
48062 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48063 stack_size = vma->vm_end - vma->vm_start;
48064 /*
48065@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_t offset,
48066 old_fs = get_fs();
48067 set_fs(get_ds());
48068 /* The cast to a user pointer is valid due to the set_fs() */
48069- result = vfs_read(file, (void __user *)addr, count, &pos);
48070+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48071 set_fs(old_fs);
48072 return result;
48073 }
48074@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48075 }
48076 rcu_read_unlock();
48077
48078- if (p->fs->users > n_fs) {
48079+ if (atomic_read(&p->fs->users) > n_fs) {
48080 bprm->unsafe |= LSM_UNSAFE_SHARE;
48081 } else {
48082 res = -EAGAIN;
48083@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
48084 char __user *__user *envp,
48085 struct pt_regs * regs)
48086 {
48087+#ifdef CONFIG_GRKERNSEC
48088+ struct file *old_exec_file;
48089+ struct acl_subject_label *old_acl;
48090+ struct rlimit old_rlim[RLIM_NLIMITS];
48091+#endif
48092 struct linux_binprm *bprm;
48093 struct file *file;
48094 struct files_struct *displaced;
48095 bool clear_in_exec;
48096 int retval;
48097+ const struct cred *cred = current_cred();
48098+
48099+ /*
48100+ * We move the actual failure in case of RLIMIT_NPROC excess from
48101+ * set*uid() to execve() because too many poorly written programs
48102+ * don't check setuid() return code. Here we additionally recheck
48103+ * whether NPROC limit is still exceeded.
48104+ */
48105+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48106+
48107+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48108+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48109+ retval = -EAGAIN;
48110+ goto out_ret;
48111+ }
48112+
48113+ /* We're below the limit (still or again), so we don't want to make
48114+ * further execve() calls fail. */
48115+ current->flags &= ~PF_NPROC_EXCEEDED;
48116
48117 retval = unshare_files(&displaced);
48118 if (retval)
48119@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
48120 bprm->filename = filename;
48121 bprm->interp = filename;
48122
48123+ if (gr_process_user_ban()) {
48124+ retval = -EPERM;
48125+ goto out_file;
48126+ }
48127+
48128+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48129+ retval = -EACCES;
48130+ goto out_file;
48131+ }
48132+
48133 retval = bprm_mm_init(bprm);
48134 if (retval)
48135 goto out_file;
48136@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
48137 if (retval < 0)
48138 goto out;
48139
48140+ if (!gr_tpe_allow(file)) {
48141+ retval = -EACCES;
48142+ goto out;
48143+ }
48144+
48145+ if (gr_check_crash_exec(file)) {
48146+ retval = -EACCES;
48147+ goto out;
48148+ }
48149+
48150+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48151+
48152+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48153+
48154+#ifdef CONFIG_GRKERNSEC
48155+ old_acl = current->acl;
48156+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48157+ old_exec_file = current->exec_file;
48158+ get_file(file);
48159+ current->exec_file = file;
48160+#endif
48161+
48162+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48163+ bprm->unsafe & LSM_UNSAFE_SHARE);
48164+ if (retval < 0)
48165+ goto out_fail;
48166+
48167 current->flags &= ~PF_KTHREAD;
48168 retval = search_binary_handler(bprm,regs);
48169 if (retval < 0)
48170- goto out;
48171+ goto out_fail;
48172+#ifdef CONFIG_GRKERNSEC
48173+ if (old_exec_file)
48174+ fput(old_exec_file);
48175+#endif
48176
48177 /* execve succeeded */
48178 current->fs->in_exec = 0;
48179@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
48180 put_files_struct(displaced);
48181 return retval;
48182
48183+out_fail:
48184+#ifdef CONFIG_GRKERNSEC
48185+ current->acl = old_acl;
48186+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48187+ fput(current->exec_file);
48188+ current->exec_file = old_exec_file;
48189+#endif
48190+
48191 out:
48192 if (bprm->mm) {
48193 acct_arg_size(bprm, 0);
48194@@ -1591,6 +1693,220 @@ out:
48195 return ispipe;
48196 }
48197
48198+int pax_check_flags(unsigned long *flags)
48199+{
48200+ int retval = 0;
48201+
48202+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48203+ if (*flags & MF_PAX_SEGMEXEC)
48204+ {
48205+ *flags &= ~MF_PAX_SEGMEXEC;
48206+ retval = -EINVAL;
48207+ }
48208+#endif
48209+
48210+ if ((*flags & MF_PAX_PAGEEXEC)
48211+
48212+#ifdef CONFIG_PAX_PAGEEXEC
48213+ && (*flags & MF_PAX_SEGMEXEC)
48214+#endif
48215+
48216+ )
48217+ {
48218+ *flags &= ~MF_PAX_PAGEEXEC;
48219+ retval = -EINVAL;
48220+ }
48221+
48222+ if ((*flags & MF_PAX_MPROTECT)
48223+
48224+#ifdef CONFIG_PAX_MPROTECT
48225+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48226+#endif
48227+
48228+ )
48229+ {
48230+ *flags &= ~MF_PAX_MPROTECT;
48231+ retval = -EINVAL;
48232+ }
48233+
48234+ if ((*flags & MF_PAX_EMUTRAMP)
48235+
48236+#ifdef CONFIG_PAX_EMUTRAMP
48237+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48238+#endif
48239+
48240+ )
48241+ {
48242+ *flags &= ~MF_PAX_EMUTRAMP;
48243+ retval = -EINVAL;
48244+ }
48245+
48246+ return retval;
48247+}
48248+
48249+EXPORT_SYMBOL(pax_check_flags);
48250+
48251+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48252+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48253+{
48254+ struct task_struct *tsk = current;
48255+ struct mm_struct *mm = current->mm;
48256+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48257+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48258+ char *path_exec = NULL;
48259+ char *path_fault = NULL;
48260+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
48261+
48262+ if (buffer_exec && buffer_fault) {
48263+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48264+
48265+ down_read(&mm->mmap_sem);
48266+ vma = mm->mmap;
48267+ while (vma && (!vma_exec || !vma_fault)) {
48268+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48269+ vma_exec = vma;
48270+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48271+ vma_fault = vma;
48272+ vma = vma->vm_next;
48273+ }
48274+ if (vma_exec) {
48275+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48276+ if (IS_ERR(path_exec))
48277+ path_exec = "<path too long>";
48278+ else {
48279+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48280+ if (path_exec) {
48281+ *path_exec = 0;
48282+ path_exec = buffer_exec;
48283+ } else
48284+ path_exec = "<path too long>";
48285+ }
48286+ }
48287+ if (vma_fault) {
48288+ start = vma_fault->vm_start;
48289+ end = vma_fault->vm_end;
48290+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48291+ if (vma_fault->vm_file) {
48292+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48293+ if (IS_ERR(path_fault))
48294+ path_fault = "<path too long>";
48295+ else {
48296+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48297+ if (path_fault) {
48298+ *path_fault = 0;
48299+ path_fault = buffer_fault;
48300+ } else
48301+ path_fault = "<path too long>";
48302+ }
48303+ } else
48304+ path_fault = "<anonymous mapping>";
48305+ }
48306+ up_read(&mm->mmap_sem);
48307+ }
48308+ if (tsk->signal->curr_ip)
48309+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48310+ else
48311+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48312+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48313+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48314+ task_uid(tsk), task_euid(tsk), pc, sp);
48315+ free_page((unsigned long)buffer_exec);
48316+ free_page((unsigned long)buffer_fault);
48317+ pax_report_insns(regs, pc, sp);
48318+ do_coredump(SIGKILL, SIGKILL, regs);
48319+}
48320+#endif
48321+
48322+#ifdef CONFIG_PAX_REFCOUNT
48323+void pax_report_refcount_overflow(struct pt_regs *regs)
48324+{
48325+ if (current->signal->curr_ip)
48326+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48327+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48328+ else
48329+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48330+ current->comm, task_pid_nr(current), current_uid(), current_euid());
48331+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48332+ show_regs(regs);
48333+ force_sig_specific(SIGKILL, current);
48334+}
48335+#endif
48336+
48337+#ifdef CONFIG_PAX_USERCOPY
48338+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48339+int object_is_on_stack(const void *obj, unsigned long len)
48340+{
48341+ const void * const stack = task_stack_page(current);
48342+ const void * const stackend = stack + THREAD_SIZE;
48343+
48344+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48345+ const void *frame = NULL;
48346+ const void *oldframe;
48347+#endif
48348+
48349+ if (obj + len < obj)
48350+ return -1;
48351+
48352+ if (obj + len <= stack || stackend <= obj)
48353+ return 0;
48354+
48355+ if (obj < stack || stackend < obj + len)
48356+ return -1;
48357+
48358+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48359+ oldframe = __builtin_frame_address(1);
48360+ if (oldframe)
48361+ frame = __builtin_frame_address(2);
48362+ /*
48363+ low ----------------------------------------------> high
48364+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
48365+ ^----------------^
48366+ allow copies only within here
48367+ */
48368+ while (stack <= frame && frame < stackend) {
48369+ /* if obj + len extends past the last frame, this
48370+ check won't pass and the next frame will be 0,
48371+ causing us to bail out and correctly report
48372+ the copy as invalid
48373+ */
48374+ if (obj + len <= frame)
48375+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48376+ oldframe = frame;
48377+ frame = *(const void * const *)frame;
48378+ }
48379+ return -1;
48380+#else
48381+ return 1;
48382+#endif
48383+}
48384+
48385+
48386+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48387+{
48388+ if (current->signal->curr_ip)
48389+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48390+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48391+ else
48392+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48393+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48394+
48395+ dump_stack();
48396+ gr_handle_kernel_exploit();
48397+ do_group_exit(SIGKILL);
48398+}
48399+#endif
48400+
48401+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48402+void pax_track_stack(void)
48403+{
48404+ unsigned long sp = (unsigned long)&sp;
48405+ if (sp < current_thread_info()->lowest_stack &&
48406+ sp > (unsigned long)task_stack_page(current))
48407+ current_thread_info()->lowest_stack = sp;
48408+}
48409+EXPORT_SYMBOL(pax_track_stack);
48410+#endif
48411+
48412 static int zap_process(struct task_struct *start)
48413 {
48414 struct task_struct *t;
48415@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct file *file)
48416 pipe = file->f_path.dentry->d_inode->i_pipe;
48417
48418 pipe_lock(pipe);
48419- pipe->readers++;
48420- pipe->writers--;
48421+ atomic_inc(&pipe->readers);
48422+ atomic_dec(&pipe->writers);
48423
48424- while ((pipe->readers > 1) && (!signal_pending(current))) {
48425+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48426 wake_up_interruptible_sync(&pipe->wait);
48427 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48428 pipe_wait(pipe);
48429 }
48430
48431- pipe->readers--;
48432- pipe->writers++;
48433+ atomic_dec(&pipe->readers);
48434+ atomic_inc(&pipe->writers);
48435 pipe_unlock(pipe);
48436
48437 }
48438@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48439 char **helper_argv = NULL;
48440 int helper_argc = 0;
48441 int dump_count = 0;
48442- static atomic_t core_dump_count = ATOMIC_INIT(0);
48443+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
48444
48445 audit_core_dumps(signr);
48446
48447+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
48448+ gr_handle_brute_attach(current, mm->flags);
48449+
48450 binfmt = mm->binfmt;
48451 if (!binfmt || !binfmt->core_dump)
48452 goto fail;
48453@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48454 */
48455 clear_thread_flag(TIF_SIGPENDING);
48456
48457+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
48458+
48459 /*
48460 * lock_kernel() because format_corename() is controlled by sysctl, which
48461 * uses lock_kernel()
48462@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48463 goto fail_unlock;
48464 }
48465
48466- dump_count = atomic_inc_return(&core_dump_count);
48467+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
48468 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
48469 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
48470 task_tgid_vnr(current), current->comm);
48471@@ -1972,7 +2293,7 @@ close_fail:
48472 filp_close(file, NULL);
48473 fail_dropcount:
48474 if (dump_count)
48475- atomic_dec(&core_dump_count);
48476+ atomic_dec_unchecked(&core_dump_count);
48477 fail_unlock:
48478 if (helper_argv)
48479 argv_free(helper_argv);
48480diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
48481index 7f8d2e5..a1abdbb 100644
48482--- a/fs/ext2/balloc.c
48483+++ b/fs/ext2/balloc.c
48484@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
48485
48486 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48487 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48488- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48489+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48490 sbi->s_resuid != current_fsuid() &&
48491 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48492 return 0;
48493diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
48494index 27967f9..9f2a5fb 100644
48495--- a/fs/ext3/balloc.c
48496+++ b/fs/ext3/balloc.c
48497@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
48498
48499 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48500 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48501- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48502+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48503 sbi->s_resuid != current_fsuid() &&
48504 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48505 return 0;
48506diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
48507index e85b63c..80398e6 100644
48508--- a/fs/ext4/balloc.c
48509+++ b/fs/ext4/balloc.c
48510@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
48511 /* Hm, nope. Are (enough) root reserved blocks available? */
48512 if (sbi->s_resuid == current_fsuid() ||
48513 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
48514- capable(CAP_SYS_RESOURCE)) {
48515+ capable_nolog(CAP_SYS_RESOURCE)) {
48516 if (free_blocks >= (nblocks + dirty_blocks))
48517 return 1;
48518 }
48519diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
48520index 67c46ed..1f237e5 100644
48521--- a/fs/ext4/ext4.h
48522+++ b/fs/ext4/ext4.h
48523@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
48524
48525 /* stats for buddy allocator */
48526 spinlock_t s_mb_pa_lock;
48527- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
48528- atomic_t s_bal_success; /* we found long enough chunks */
48529- atomic_t s_bal_allocated; /* in blocks */
48530- atomic_t s_bal_ex_scanned; /* total extents scanned */
48531- atomic_t s_bal_goals; /* goal hits */
48532- atomic_t s_bal_breaks; /* too long searches */
48533- atomic_t s_bal_2orders; /* 2^order hits */
48534+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
48535+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
48536+ atomic_unchecked_t s_bal_allocated; /* in blocks */
48537+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
48538+ atomic_unchecked_t s_bal_goals; /* goal hits */
48539+ atomic_unchecked_t s_bal_breaks; /* too long searches */
48540+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
48541 spinlock_t s_bal_lock;
48542 unsigned long s_mb_buddies_generated;
48543 unsigned long long s_mb_generation_time;
48544- atomic_t s_mb_lost_chunks;
48545- atomic_t s_mb_preallocated;
48546- atomic_t s_mb_discarded;
48547+ atomic_unchecked_t s_mb_lost_chunks;
48548+ atomic_unchecked_t s_mb_preallocated;
48549+ atomic_unchecked_t s_mb_discarded;
48550 atomic_t s_lock_busy;
48551
48552 /* locality groups */
48553diff --git a/fs/ext4/file.c b/fs/ext4/file.c
48554index 2a60541..7439d61 100644
48555--- a/fs/ext4/file.c
48556+++ b/fs/ext4/file.c
48557@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
48558 cp = d_path(&path, buf, sizeof(buf));
48559 path_put(&path);
48560 if (!IS_ERR(cp)) {
48561- memcpy(sbi->s_es->s_last_mounted, cp,
48562- sizeof(sbi->s_es->s_last_mounted));
48563+ strlcpy(sbi->s_es->s_last_mounted, cp,
48564+ sizeof(sbi->s_es->s_last_mounted));
48565 sb->s_dirt = 1;
48566 }
48567 }
48568diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
48569index 42bac1b..0aab9d8 100644
48570--- a/fs/ext4/mballoc.c
48571+++ b/fs/ext4/mballoc.c
48572@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
48573 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
48574
48575 if (EXT4_SB(sb)->s_mb_stats)
48576- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
48577+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
48578
48579 break;
48580 }
48581@@ -2131,7 +2131,7 @@ repeat:
48582 ac->ac_status = AC_STATUS_CONTINUE;
48583 ac->ac_flags |= EXT4_MB_HINT_FIRST;
48584 cr = 3;
48585- atomic_inc(&sbi->s_mb_lost_chunks);
48586+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
48587 goto repeat;
48588 }
48589 }
48590@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
48591 ext4_grpblk_t counters[16];
48592 } sg;
48593
48594+ pax_track_stack();
48595+
48596 group--;
48597 if (group == 0)
48598 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
48599@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
48600 if (sbi->s_mb_stats) {
48601 printk(KERN_INFO
48602 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
48603- atomic_read(&sbi->s_bal_allocated),
48604- atomic_read(&sbi->s_bal_reqs),
48605- atomic_read(&sbi->s_bal_success));
48606+ atomic_read_unchecked(&sbi->s_bal_allocated),
48607+ atomic_read_unchecked(&sbi->s_bal_reqs),
48608+ atomic_read_unchecked(&sbi->s_bal_success));
48609 printk(KERN_INFO
48610 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
48611 "%u 2^N hits, %u breaks, %u lost\n",
48612- atomic_read(&sbi->s_bal_ex_scanned),
48613- atomic_read(&sbi->s_bal_goals),
48614- atomic_read(&sbi->s_bal_2orders),
48615- atomic_read(&sbi->s_bal_breaks),
48616- atomic_read(&sbi->s_mb_lost_chunks));
48617+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
48618+ atomic_read_unchecked(&sbi->s_bal_goals),
48619+ atomic_read_unchecked(&sbi->s_bal_2orders),
48620+ atomic_read_unchecked(&sbi->s_bal_breaks),
48621+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
48622 printk(KERN_INFO
48623 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
48624 sbi->s_mb_buddies_generated++,
48625 sbi->s_mb_generation_time);
48626 printk(KERN_INFO
48627 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
48628- atomic_read(&sbi->s_mb_preallocated),
48629- atomic_read(&sbi->s_mb_discarded));
48630+ atomic_read_unchecked(&sbi->s_mb_preallocated),
48631+ atomic_read_unchecked(&sbi->s_mb_discarded));
48632 }
48633
48634 free_percpu(sbi->s_locality_groups);
48635@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
48636 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
48637
48638 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
48639- atomic_inc(&sbi->s_bal_reqs);
48640- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48641+ atomic_inc_unchecked(&sbi->s_bal_reqs);
48642+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48643 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
48644- atomic_inc(&sbi->s_bal_success);
48645- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
48646+ atomic_inc_unchecked(&sbi->s_bal_success);
48647+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
48648 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
48649 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
48650- atomic_inc(&sbi->s_bal_goals);
48651+ atomic_inc_unchecked(&sbi->s_bal_goals);
48652 if (ac->ac_found > sbi->s_mb_max_to_scan)
48653- atomic_inc(&sbi->s_bal_breaks);
48654+ atomic_inc_unchecked(&sbi->s_bal_breaks);
48655 }
48656
48657 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
48658@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
48659 trace_ext4_mb_new_inode_pa(ac, pa);
48660
48661 ext4_mb_use_inode_pa(ac, pa);
48662- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48663+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48664
48665 ei = EXT4_I(ac->ac_inode);
48666 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48667@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
48668 trace_ext4_mb_new_group_pa(ac, pa);
48669
48670 ext4_mb_use_group_pa(ac, pa);
48671- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48672+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48673
48674 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48675 lg = ac->ac_lg;
48676@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
48677 * from the bitmap and continue.
48678 */
48679 }
48680- atomic_add(free, &sbi->s_mb_discarded);
48681+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
48682
48683 return err;
48684 }
48685@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
48686 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
48687 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
48688 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
48689- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48690+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48691
48692 if (ac) {
48693 ac->ac_sb = sb;
48694diff --git a/fs/ext4/super.c b/fs/ext4/super.c
48695index f27e045..be5a1c3 100644
48696--- a/fs/ext4/super.c
48697+++ b/fs/ext4/super.c
48698@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobject *kobj)
48699 }
48700
48701
48702-static struct sysfs_ops ext4_attr_ops = {
48703+static const struct sysfs_ops ext4_attr_ops = {
48704 .show = ext4_attr_show,
48705 .store = ext4_attr_store,
48706 };
48707diff --git a/fs/fcntl.c b/fs/fcntl.c
48708index 97e01dc..e9aab2d 100644
48709--- a/fs/fcntl.c
48710+++ b/fs/fcntl.c
48711@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
48712 if (err)
48713 return err;
48714
48715+ if (gr_handle_chroot_fowner(pid, type))
48716+ return -ENOENT;
48717+ if (gr_check_protected_task_fowner(pid, type))
48718+ return -EACCES;
48719+
48720 f_modown(filp, pid, type, force);
48721 return 0;
48722 }
48723@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
48724
48725 static int f_setown_ex(struct file *filp, unsigned long arg)
48726 {
48727- struct f_owner_ex * __user owner_p = (void * __user)arg;
48728+ struct f_owner_ex __user *owner_p = (void __user *)arg;
48729 struct f_owner_ex owner;
48730 struct pid *pid;
48731 int type;
48732@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
48733
48734 static int f_getown_ex(struct file *filp, unsigned long arg)
48735 {
48736- struct f_owner_ex * __user owner_p = (void * __user)arg;
48737+ struct f_owner_ex __user *owner_p = (void __user *)arg;
48738 struct f_owner_ex owner;
48739 int ret = 0;
48740
48741@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
48742 switch (cmd) {
48743 case F_DUPFD:
48744 case F_DUPFD_CLOEXEC:
48745+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
48746 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
48747 break;
48748 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
48749diff --git a/fs/fifo.c b/fs/fifo.c
48750index f8f97b8..b1f2259 100644
48751--- a/fs/fifo.c
48752+++ b/fs/fifo.c
48753@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
48754 */
48755 filp->f_op = &read_pipefifo_fops;
48756 pipe->r_counter++;
48757- if (pipe->readers++ == 0)
48758+ if (atomic_inc_return(&pipe->readers) == 1)
48759 wake_up_partner(inode);
48760
48761- if (!pipe->writers) {
48762+ if (!atomic_read(&pipe->writers)) {
48763 if ((filp->f_flags & O_NONBLOCK)) {
48764 /* suppress POLLHUP until we have
48765 * seen a writer */
48766@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
48767 * errno=ENXIO when there is no process reading the FIFO.
48768 */
48769 ret = -ENXIO;
48770- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
48771+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
48772 goto err;
48773
48774 filp->f_op = &write_pipefifo_fops;
48775 pipe->w_counter++;
48776- if (!pipe->writers++)
48777+ if (atomic_inc_return(&pipe->writers) == 1)
48778 wake_up_partner(inode);
48779
48780- if (!pipe->readers) {
48781+ if (!atomic_read(&pipe->readers)) {
48782 wait_for_partner(inode, &pipe->r_counter);
48783 if (signal_pending(current))
48784 goto err_wr;
48785@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
48786 */
48787 filp->f_op = &rdwr_pipefifo_fops;
48788
48789- pipe->readers++;
48790- pipe->writers++;
48791+ atomic_inc(&pipe->readers);
48792+ atomic_inc(&pipe->writers);
48793 pipe->r_counter++;
48794 pipe->w_counter++;
48795- if (pipe->readers == 1 || pipe->writers == 1)
48796+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
48797 wake_up_partner(inode);
48798 break;
48799
48800@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
48801 return 0;
48802
48803 err_rd:
48804- if (!--pipe->readers)
48805+ if (atomic_dec_and_test(&pipe->readers))
48806 wake_up_interruptible(&pipe->wait);
48807 ret = -ERESTARTSYS;
48808 goto err;
48809
48810 err_wr:
48811- if (!--pipe->writers)
48812+ if (atomic_dec_and_test(&pipe->writers))
48813 wake_up_interruptible(&pipe->wait);
48814 ret = -ERESTARTSYS;
48815 goto err;
48816
48817 err:
48818- if (!pipe->readers && !pipe->writers)
48819+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
48820 free_pipe_info(inode);
48821
48822 err_nocleanup:
48823diff --git a/fs/file.c b/fs/file.c
48824index 87e1290..a930cc4 100644
48825--- a/fs/file.c
48826+++ b/fs/file.c
48827@@ -14,6 +14,7 @@
48828 #include <linux/slab.h>
48829 #include <linux/vmalloc.h>
48830 #include <linux/file.h>
48831+#include <linux/security.h>
48832 #include <linux/fdtable.h>
48833 #include <linux/bitops.h>
48834 #include <linux/interrupt.h>
48835@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
48836 * N.B. For clone tasks sharing a files structure, this test
48837 * will limit the total number of files that can be opened.
48838 */
48839+
48840+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
48841 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
48842 return -EMFILE;
48843
48844diff --git a/fs/filesystems.c b/fs/filesystems.c
48845index a24c58e..53f91ee 100644
48846--- a/fs/filesystems.c
48847+++ b/fs/filesystems.c
48848@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
48849 int len = dot ? dot - name : strlen(name);
48850
48851 fs = __get_fs_type(name, len);
48852+
48853+#ifdef CONFIG_GRKERNSEC_MODHARDEN
48854+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
48855+#else
48856 if (!fs && (request_module("%.*s", len, name) == 0))
48857+#endif
48858 fs = __get_fs_type(name, len);
48859
48860 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
48861diff --git a/fs/fs_struct.c b/fs/fs_struct.c
48862index eee0590..ef5bc0e 100644
48863--- a/fs/fs_struct.c
48864+++ b/fs/fs_struct.c
48865@@ -4,6 +4,7 @@
48866 #include <linux/path.h>
48867 #include <linux/slab.h>
48868 #include <linux/fs_struct.h>
48869+#include <linux/grsecurity.h>
48870
48871 /*
48872 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
48873@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
48874 old_root = fs->root;
48875 fs->root = *path;
48876 path_get(path);
48877+ gr_set_chroot_entries(current, path);
48878 write_unlock(&fs->lock);
48879 if (old_root.dentry)
48880 path_put(&old_root);
48881@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
48882 && fs->root.mnt == old_root->mnt) {
48883 path_get(new_root);
48884 fs->root = *new_root;
48885+ gr_set_chroot_entries(p, new_root);
48886 count++;
48887 }
48888 if (fs->pwd.dentry == old_root->dentry
48889@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
48890 task_lock(tsk);
48891 write_lock(&fs->lock);
48892 tsk->fs = NULL;
48893- kill = !--fs->users;
48894+ gr_clear_chroot_entries(tsk);
48895+ kill = !atomic_dec_return(&fs->users);
48896 write_unlock(&fs->lock);
48897 task_unlock(tsk);
48898 if (kill)
48899@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
48900 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
48901 /* We don't need to lock fs - think why ;-) */
48902 if (fs) {
48903- fs->users = 1;
48904+ atomic_set(&fs->users, 1);
48905 fs->in_exec = 0;
48906 rwlock_init(&fs->lock);
48907 fs->umask = old->umask;
48908@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
48909
48910 task_lock(current);
48911 write_lock(&fs->lock);
48912- kill = !--fs->users;
48913+ kill = !atomic_dec_return(&fs->users);
48914 current->fs = new_fs;
48915+ gr_set_chroot_entries(current, &new_fs->root);
48916 write_unlock(&fs->lock);
48917 task_unlock(current);
48918
48919@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
48920
48921 /* to be mentioned only in INIT_TASK */
48922 struct fs_struct init_fs = {
48923- .users = 1,
48924+ .users = ATOMIC_INIT(1),
48925 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
48926 .umask = 0022,
48927 };
48928@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
48929 task_lock(current);
48930
48931 write_lock(&init_fs.lock);
48932- init_fs.users++;
48933+ atomic_inc(&init_fs.users);
48934 write_unlock(&init_fs.lock);
48935
48936 write_lock(&fs->lock);
48937 current->fs = &init_fs;
48938- kill = !--fs->users;
48939+ gr_set_chroot_entries(current, &current->fs->root);
48940+ kill = !atomic_dec_return(&fs->users);
48941 write_unlock(&fs->lock);
48942
48943 task_unlock(current);
48944diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
48945index 9905350..02eaec4 100644
48946--- a/fs/fscache/cookie.c
48947+++ b/fs/fscache/cookie.c
48948@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
48949 parent ? (char *) parent->def->name : "<no-parent>",
48950 def->name, netfs_data);
48951
48952- fscache_stat(&fscache_n_acquires);
48953+ fscache_stat_unchecked(&fscache_n_acquires);
48954
48955 /* if there's no parent cookie, then we don't create one here either */
48956 if (!parent) {
48957- fscache_stat(&fscache_n_acquires_null);
48958+ fscache_stat_unchecked(&fscache_n_acquires_null);
48959 _leave(" [no parent]");
48960 return NULL;
48961 }
48962@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
48963 /* allocate and initialise a cookie */
48964 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
48965 if (!cookie) {
48966- fscache_stat(&fscache_n_acquires_oom);
48967+ fscache_stat_unchecked(&fscache_n_acquires_oom);
48968 _leave(" [ENOMEM]");
48969 return NULL;
48970 }
48971@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
48972
48973 switch (cookie->def->type) {
48974 case FSCACHE_COOKIE_TYPE_INDEX:
48975- fscache_stat(&fscache_n_cookie_index);
48976+ fscache_stat_unchecked(&fscache_n_cookie_index);
48977 break;
48978 case FSCACHE_COOKIE_TYPE_DATAFILE:
48979- fscache_stat(&fscache_n_cookie_data);
48980+ fscache_stat_unchecked(&fscache_n_cookie_data);
48981 break;
48982 default:
48983- fscache_stat(&fscache_n_cookie_special);
48984+ fscache_stat_unchecked(&fscache_n_cookie_special);
48985 break;
48986 }
48987
48988@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
48989 if (fscache_acquire_non_index_cookie(cookie) < 0) {
48990 atomic_dec(&parent->n_children);
48991 __fscache_cookie_put(cookie);
48992- fscache_stat(&fscache_n_acquires_nobufs);
48993+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
48994 _leave(" = NULL");
48995 return NULL;
48996 }
48997 }
48998
48999- fscache_stat(&fscache_n_acquires_ok);
49000+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49001 _leave(" = %p", cookie);
49002 return cookie;
49003 }
49004@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49005 cache = fscache_select_cache_for_object(cookie->parent);
49006 if (!cache) {
49007 up_read(&fscache_addremove_sem);
49008- fscache_stat(&fscache_n_acquires_no_cache);
49009+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49010 _leave(" = -ENOMEDIUM [no cache]");
49011 return -ENOMEDIUM;
49012 }
49013@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49014 object = cache->ops->alloc_object(cache, cookie);
49015 fscache_stat_d(&fscache_n_cop_alloc_object);
49016 if (IS_ERR(object)) {
49017- fscache_stat(&fscache_n_object_no_alloc);
49018+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49019 ret = PTR_ERR(object);
49020 goto error;
49021 }
49022
49023- fscache_stat(&fscache_n_object_alloc);
49024+ fscache_stat_unchecked(&fscache_n_object_alloc);
49025
49026 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49027
49028@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49029 struct fscache_object *object;
49030 struct hlist_node *_p;
49031
49032- fscache_stat(&fscache_n_updates);
49033+ fscache_stat_unchecked(&fscache_n_updates);
49034
49035 if (!cookie) {
49036- fscache_stat(&fscache_n_updates_null);
49037+ fscache_stat_unchecked(&fscache_n_updates_null);
49038 _leave(" [no cookie]");
49039 return;
49040 }
49041@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49042 struct fscache_object *object;
49043 unsigned long event;
49044
49045- fscache_stat(&fscache_n_relinquishes);
49046+ fscache_stat_unchecked(&fscache_n_relinquishes);
49047 if (retire)
49048- fscache_stat(&fscache_n_relinquishes_retire);
49049+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49050
49051 if (!cookie) {
49052- fscache_stat(&fscache_n_relinquishes_null);
49053+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49054 _leave(" [no cookie]");
49055 return;
49056 }
49057@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49058
49059 /* wait for the cookie to finish being instantiated (or to fail) */
49060 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49061- fscache_stat(&fscache_n_relinquishes_waitcrt);
49062+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49063 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49064 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49065 }
49066diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49067index edd7434..0725e66 100644
49068--- a/fs/fscache/internal.h
49069+++ b/fs/fscache/internal.h
49070@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49071 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49072 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49073
49074-extern atomic_t fscache_n_op_pend;
49075-extern atomic_t fscache_n_op_run;
49076-extern atomic_t fscache_n_op_enqueue;
49077-extern atomic_t fscache_n_op_deferred_release;
49078-extern atomic_t fscache_n_op_release;
49079-extern atomic_t fscache_n_op_gc;
49080-extern atomic_t fscache_n_op_cancelled;
49081-extern atomic_t fscache_n_op_rejected;
49082-
49083-extern atomic_t fscache_n_attr_changed;
49084-extern atomic_t fscache_n_attr_changed_ok;
49085-extern atomic_t fscache_n_attr_changed_nobufs;
49086-extern atomic_t fscache_n_attr_changed_nomem;
49087-extern atomic_t fscache_n_attr_changed_calls;
49088-
49089-extern atomic_t fscache_n_allocs;
49090-extern atomic_t fscache_n_allocs_ok;
49091-extern atomic_t fscache_n_allocs_wait;
49092-extern atomic_t fscache_n_allocs_nobufs;
49093-extern atomic_t fscache_n_allocs_intr;
49094-extern atomic_t fscache_n_allocs_object_dead;
49095-extern atomic_t fscache_n_alloc_ops;
49096-extern atomic_t fscache_n_alloc_op_waits;
49097-
49098-extern atomic_t fscache_n_retrievals;
49099-extern atomic_t fscache_n_retrievals_ok;
49100-extern atomic_t fscache_n_retrievals_wait;
49101-extern atomic_t fscache_n_retrievals_nodata;
49102-extern atomic_t fscache_n_retrievals_nobufs;
49103-extern atomic_t fscache_n_retrievals_intr;
49104-extern atomic_t fscache_n_retrievals_nomem;
49105-extern atomic_t fscache_n_retrievals_object_dead;
49106-extern atomic_t fscache_n_retrieval_ops;
49107-extern atomic_t fscache_n_retrieval_op_waits;
49108-
49109-extern atomic_t fscache_n_stores;
49110-extern atomic_t fscache_n_stores_ok;
49111-extern atomic_t fscache_n_stores_again;
49112-extern atomic_t fscache_n_stores_nobufs;
49113-extern atomic_t fscache_n_stores_oom;
49114-extern atomic_t fscache_n_store_ops;
49115-extern atomic_t fscache_n_store_calls;
49116-extern atomic_t fscache_n_store_pages;
49117-extern atomic_t fscache_n_store_radix_deletes;
49118-extern atomic_t fscache_n_store_pages_over_limit;
49119-
49120-extern atomic_t fscache_n_store_vmscan_not_storing;
49121-extern atomic_t fscache_n_store_vmscan_gone;
49122-extern atomic_t fscache_n_store_vmscan_busy;
49123-extern atomic_t fscache_n_store_vmscan_cancelled;
49124-
49125-extern atomic_t fscache_n_marks;
49126-extern atomic_t fscache_n_uncaches;
49127-
49128-extern atomic_t fscache_n_acquires;
49129-extern atomic_t fscache_n_acquires_null;
49130-extern atomic_t fscache_n_acquires_no_cache;
49131-extern atomic_t fscache_n_acquires_ok;
49132-extern atomic_t fscache_n_acquires_nobufs;
49133-extern atomic_t fscache_n_acquires_oom;
49134-
49135-extern atomic_t fscache_n_updates;
49136-extern atomic_t fscache_n_updates_null;
49137-extern atomic_t fscache_n_updates_run;
49138-
49139-extern atomic_t fscache_n_relinquishes;
49140-extern atomic_t fscache_n_relinquishes_null;
49141-extern atomic_t fscache_n_relinquishes_waitcrt;
49142-extern atomic_t fscache_n_relinquishes_retire;
49143-
49144-extern atomic_t fscache_n_cookie_index;
49145-extern atomic_t fscache_n_cookie_data;
49146-extern atomic_t fscache_n_cookie_special;
49147-
49148-extern atomic_t fscache_n_object_alloc;
49149-extern atomic_t fscache_n_object_no_alloc;
49150-extern atomic_t fscache_n_object_lookups;
49151-extern atomic_t fscache_n_object_lookups_negative;
49152-extern atomic_t fscache_n_object_lookups_positive;
49153-extern atomic_t fscache_n_object_lookups_timed_out;
49154-extern atomic_t fscache_n_object_created;
49155-extern atomic_t fscache_n_object_avail;
49156-extern atomic_t fscache_n_object_dead;
49157-
49158-extern atomic_t fscache_n_checkaux_none;
49159-extern atomic_t fscache_n_checkaux_okay;
49160-extern atomic_t fscache_n_checkaux_update;
49161-extern atomic_t fscache_n_checkaux_obsolete;
49162+extern atomic_unchecked_t fscache_n_op_pend;
49163+extern atomic_unchecked_t fscache_n_op_run;
49164+extern atomic_unchecked_t fscache_n_op_enqueue;
49165+extern atomic_unchecked_t fscache_n_op_deferred_release;
49166+extern atomic_unchecked_t fscache_n_op_release;
49167+extern atomic_unchecked_t fscache_n_op_gc;
49168+extern atomic_unchecked_t fscache_n_op_cancelled;
49169+extern atomic_unchecked_t fscache_n_op_rejected;
49170+
49171+extern atomic_unchecked_t fscache_n_attr_changed;
49172+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49173+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49174+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49175+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49176+
49177+extern atomic_unchecked_t fscache_n_allocs;
49178+extern atomic_unchecked_t fscache_n_allocs_ok;
49179+extern atomic_unchecked_t fscache_n_allocs_wait;
49180+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49181+extern atomic_unchecked_t fscache_n_allocs_intr;
49182+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49183+extern atomic_unchecked_t fscache_n_alloc_ops;
49184+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49185+
49186+extern atomic_unchecked_t fscache_n_retrievals;
49187+extern atomic_unchecked_t fscache_n_retrievals_ok;
49188+extern atomic_unchecked_t fscache_n_retrievals_wait;
49189+extern atomic_unchecked_t fscache_n_retrievals_nodata;
49190+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49191+extern atomic_unchecked_t fscache_n_retrievals_intr;
49192+extern atomic_unchecked_t fscache_n_retrievals_nomem;
49193+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49194+extern atomic_unchecked_t fscache_n_retrieval_ops;
49195+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49196+
49197+extern atomic_unchecked_t fscache_n_stores;
49198+extern atomic_unchecked_t fscache_n_stores_ok;
49199+extern atomic_unchecked_t fscache_n_stores_again;
49200+extern atomic_unchecked_t fscache_n_stores_nobufs;
49201+extern atomic_unchecked_t fscache_n_stores_oom;
49202+extern atomic_unchecked_t fscache_n_store_ops;
49203+extern atomic_unchecked_t fscache_n_store_calls;
49204+extern atomic_unchecked_t fscache_n_store_pages;
49205+extern atomic_unchecked_t fscache_n_store_radix_deletes;
49206+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49207+
49208+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49209+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49210+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49211+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49212+
49213+extern atomic_unchecked_t fscache_n_marks;
49214+extern atomic_unchecked_t fscache_n_uncaches;
49215+
49216+extern atomic_unchecked_t fscache_n_acquires;
49217+extern atomic_unchecked_t fscache_n_acquires_null;
49218+extern atomic_unchecked_t fscache_n_acquires_no_cache;
49219+extern atomic_unchecked_t fscache_n_acquires_ok;
49220+extern atomic_unchecked_t fscache_n_acquires_nobufs;
49221+extern atomic_unchecked_t fscache_n_acquires_oom;
49222+
49223+extern atomic_unchecked_t fscache_n_updates;
49224+extern atomic_unchecked_t fscache_n_updates_null;
49225+extern atomic_unchecked_t fscache_n_updates_run;
49226+
49227+extern atomic_unchecked_t fscache_n_relinquishes;
49228+extern atomic_unchecked_t fscache_n_relinquishes_null;
49229+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49230+extern atomic_unchecked_t fscache_n_relinquishes_retire;
49231+
49232+extern atomic_unchecked_t fscache_n_cookie_index;
49233+extern atomic_unchecked_t fscache_n_cookie_data;
49234+extern atomic_unchecked_t fscache_n_cookie_special;
49235+
49236+extern atomic_unchecked_t fscache_n_object_alloc;
49237+extern atomic_unchecked_t fscache_n_object_no_alloc;
49238+extern atomic_unchecked_t fscache_n_object_lookups;
49239+extern atomic_unchecked_t fscache_n_object_lookups_negative;
49240+extern atomic_unchecked_t fscache_n_object_lookups_positive;
49241+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49242+extern atomic_unchecked_t fscache_n_object_created;
49243+extern atomic_unchecked_t fscache_n_object_avail;
49244+extern atomic_unchecked_t fscache_n_object_dead;
49245+
49246+extern atomic_unchecked_t fscache_n_checkaux_none;
49247+extern atomic_unchecked_t fscache_n_checkaux_okay;
49248+extern atomic_unchecked_t fscache_n_checkaux_update;
49249+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49250
49251 extern atomic_t fscache_n_cop_alloc_object;
49252 extern atomic_t fscache_n_cop_lookup_object;
49253@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49254 atomic_inc(stat);
49255 }
49256
49257+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49258+{
49259+ atomic_inc_unchecked(stat);
49260+}
49261+
49262 static inline void fscache_stat_d(atomic_t *stat)
49263 {
49264 atomic_dec(stat);
49265@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49266
49267 #define __fscache_stat(stat) (NULL)
49268 #define fscache_stat(stat) do {} while (0)
49269+#define fscache_stat_unchecked(stat) do {} while (0)
49270 #define fscache_stat_d(stat) do {} while (0)
49271 #endif
49272
49273diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49274index e513ac5..e888d34 100644
49275--- a/fs/fscache/object.c
49276+++ b/fs/fscache/object.c
49277@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49278 /* update the object metadata on disk */
49279 case FSCACHE_OBJECT_UPDATING:
49280 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49281- fscache_stat(&fscache_n_updates_run);
49282+ fscache_stat_unchecked(&fscache_n_updates_run);
49283 fscache_stat(&fscache_n_cop_update_object);
49284 object->cache->ops->update_object(object);
49285 fscache_stat_d(&fscache_n_cop_update_object);
49286@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49287 spin_lock(&object->lock);
49288 object->state = FSCACHE_OBJECT_DEAD;
49289 spin_unlock(&object->lock);
49290- fscache_stat(&fscache_n_object_dead);
49291+ fscache_stat_unchecked(&fscache_n_object_dead);
49292 goto terminal_transit;
49293
49294 /* handle the parent cache of this object being withdrawn from
49295@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49296 spin_lock(&object->lock);
49297 object->state = FSCACHE_OBJECT_DEAD;
49298 spin_unlock(&object->lock);
49299- fscache_stat(&fscache_n_object_dead);
49300+ fscache_stat_unchecked(&fscache_n_object_dead);
49301 goto terminal_transit;
49302
49303 /* complain about the object being woken up once it is
49304@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49305 parent->cookie->def->name, cookie->def->name,
49306 object->cache->tag->name);
49307
49308- fscache_stat(&fscache_n_object_lookups);
49309+ fscache_stat_unchecked(&fscache_n_object_lookups);
49310 fscache_stat(&fscache_n_cop_lookup_object);
49311 ret = object->cache->ops->lookup_object(object);
49312 fscache_stat_d(&fscache_n_cop_lookup_object);
49313@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49314 if (ret == -ETIMEDOUT) {
49315 /* probably stuck behind another object, so move this one to
49316 * the back of the queue */
49317- fscache_stat(&fscache_n_object_lookups_timed_out);
49318+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49319 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49320 }
49321
49322@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49323
49324 spin_lock(&object->lock);
49325 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49326- fscache_stat(&fscache_n_object_lookups_negative);
49327+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49328
49329 /* transit here to allow write requests to begin stacking up
49330 * and read requests to begin returning ENODATA */
49331@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49332 * result, in which case there may be data available */
49333 spin_lock(&object->lock);
49334 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49335- fscache_stat(&fscache_n_object_lookups_positive);
49336+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49337
49338 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49339
49340@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49341 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49342 } else {
49343 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49344- fscache_stat(&fscache_n_object_created);
49345+ fscache_stat_unchecked(&fscache_n_object_created);
49346
49347 object->state = FSCACHE_OBJECT_AVAILABLE;
49348 spin_unlock(&object->lock);
49349@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49350 fscache_enqueue_dependents(object);
49351
49352 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49353- fscache_stat(&fscache_n_object_avail);
49354+ fscache_stat_unchecked(&fscache_n_object_avail);
49355
49356 _leave("");
49357 }
49358@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49359 enum fscache_checkaux result;
49360
49361 if (!object->cookie->def->check_aux) {
49362- fscache_stat(&fscache_n_checkaux_none);
49363+ fscache_stat_unchecked(&fscache_n_checkaux_none);
49364 return FSCACHE_CHECKAUX_OKAY;
49365 }
49366
49367@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49368 switch (result) {
49369 /* entry okay as is */
49370 case FSCACHE_CHECKAUX_OKAY:
49371- fscache_stat(&fscache_n_checkaux_okay);
49372+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
49373 break;
49374
49375 /* entry requires update */
49376 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49377- fscache_stat(&fscache_n_checkaux_update);
49378+ fscache_stat_unchecked(&fscache_n_checkaux_update);
49379 break;
49380
49381 /* entry requires deletion */
49382 case FSCACHE_CHECKAUX_OBSOLETE:
49383- fscache_stat(&fscache_n_checkaux_obsolete);
49384+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49385 break;
49386
49387 default:
49388diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49389index 313e79a..775240f 100644
49390--- a/fs/fscache/operation.c
49391+++ b/fs/fscache/operation.c
49392@@ -16,7 +16,7 @@
49393 #include <linux/seq_file.h>
49394 #include "internal.h"
49395
49396-atomic_t fscache_op_debug_id;
49397+atomic_unchecked_t fscache_op_debug_id;
49398 EXPORT_SYMBOL(fscache_op_debug_id);
49399
49400 /**
49401@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49402 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49403 ASSERTCMP(atomic_read(&op->usage), >, 0);
49404
49405- fscache_stat(&fscache_n_op_enqueue);
49406+ fscache_stat_unchecked(&fscache_n_op_enqueue);
49407 switch (op->flags & FSCACHE_OP_TYPE) {
49408 case FSCACHE_OP_FAST:
49409 _debug("queue fast");
49410@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49411 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49412 if (op->processor)
49413 fscache_enqueue_operation(op);
49414- fscache_stat(&fscache_n_op_run);
49415+ fscache_stat_unchecked(&fscache_n_op_run);
49416 }
49417
49418 /*
49419@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49420 if (object->n_ops > 0) {
49421 atomic_inc(&op->usage);
49422 list_add_tail(&op->pend_link, &object->pending_ops);
49423- fscache_stat(&fscache_n_op_pend);
49424+ fscache_stat_unchecked(&fscache_n_op_pend);
49425 } else if (!list_empty(&object->pending_ops)) {
49426 atomic_inc(&op->usage);
49427 list_add_tail(&op->pend_link, &object->pending_ops);
49428- fscache_stat(&fscache_n_op_pend);
49429+ fscache_stat_unchecked(&fscache_n_op_pend);
49430 fscache_start_operations(object);
49431 } else {
49432 ASSERTCMP(object->n_in_progress, ==, 0);
49433@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49434 object->n_exclusive++; /* reads and writes must wait */
49435 atomic_inc(&op->usage);
49436 list_add_tail(&op->pend_link, &object->pending_ops);
49437- fscache_stat(&fscache_n_op_pend);
49438+ fscache_stat_unchecked(&fscache_n_op_pend);
49439 ret = 0;
49440 } else {
49441 /* not allowed to submit ops in any other state */
49442@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49443 if (object->n_exclusive > 0) {
49444 atomic_inc(&op->usage);
49445 list_add_tail(&op->pend_link, &object->pending_ops);
49446- fscache_stat(&fscache_n_op_pend);
49447+ fscache_stat_unchecked(&fscache_n_op_pend);
49448 } else if (!list_empty(&object->pending_ops)) {
49449 atomic_inc(&op->usage);
49450 list_add_tail(&op->pend_link, &object->pending_ops);
49451- fscache_stat(&fscache_n_op_pend);
49452+ fscache_stat_unchecked(&fscache_n_op_pend);
49453 fscache_start_operations(object);
49454 } else {
49455 ASSERTCMP(object->n_exclusive, ==, 0);
49456@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
49457 object->n_ops++;
49458 atomic_inc(&op->usage);
49459 list_add_tail(&op->pend_link, &object->pending_ops);
49460- fscache_stat(&fscache_n_op_pend);
49461+ fscache_stat_unchecked(&fscache_n_op_pend);
49462 ret = 0;
49463 } else if (object->state == FSCACHE_OBJECT_DYING ||
49464 object->state == FSCACHE_OBJECT_LC_DYING ||
49465 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49466- fscache_stat(&fscache_n_op_rejected);
49467+ fscache_stat_unchecked(&fscache_n_op_rejected);
49468 ret = -ENOBUFS;
49469 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49470 fscache_report_unexpected_submission(object, op, ostate);
49471@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
49472
49473 ret = -EBUSY;
49474 if (!list_empty(&op->pend_link)) {
49475- fscache_stat(&fscache_n_op_cancelled);
49476+ fscache_stat_unchecked(&fscache_n_op_cancelled);
49477 list_del_init(&op->pend_link);
49478 object->n_ops--;
49479 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
49480@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
49481 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
49482 BUG();
49483
49484- fscache_stat(&fscache_n_op_release);
49485+ fscache_stat_unchecked(&fscache_n_op_release);
49486
49487 if (op->release) {
49488 op->release(op);
49489@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
49490 * lock, and defer it otherwise */
49491 if (!spin_trylock(&object->lock)) {
49492 _debug("defer put");
49493- fscache_stat(&fscache_n_op_deferred_release);
49494+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
49495
49496 cache = object->cache;
49497 spin_lock(&cache->op_gc_list_lock);
49498@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
49499
49500 _debug("GC DEFERRED REL OBJ%x OP%x",
49501 object->debug_id, op->debug_id);
49502- fscache_stat(&fscache_n_op_gc);
49503+ fscache_stat_unchecked(&fscache_n_op_gc);
49504
49505 ASSERTCMP(atomic_read(&op->usage), ==, 0);
49506
49507diff --git a/fs/fscache/page.c b/fs/fscache/page.c
49508index c598ea4..6aac13e 100644
49509--- a/fs/fscache/page.c
49510+++ b/fs/fscache/page.c
49511@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49512 val = radix_tree_lookup(&cookie->stores, page->index);
49513 if (!val) {
49514 rcu_read_unlock();
49515- fscache_stat(&fscache_n_store_vmscan_not_storing);
49516+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
49517 __fscache_uncache_page(cookie, page);
49518 return true;
49519 }
49520@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49521 spin_unlock(&cookie->stores_lock);
49522
49523 if (xpage) {
49524- fscache_stat(&fscache_n_store_vmscan_cancelled);
49525- fscache_stat(&fscache_n_store_radix_deletes);
49526+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
49527+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49528 ASSERTCMP(xpage, ==, page);
49529 } else {
49530- fscache_stat(&fscache_n_store_vmscan_gone);
49531+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
49532 }
49533
49534 wake_up_bit(&cookie->flags, 0);
49535@@ -106,7 +106,7 @@ page_busy:
49536 /* we might want to wait here, but that could deadlock the allocator as
49537 * the slow-work threads writing to the cache may all end up sleeping
49538 * on memory allocation */
49539- fscache_stat(&fscache_n_store_vmscan_busy);
49540+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
49541 return false;
49542 }
49543 EXPORT_SYMBOL(__fscache_maybe_release_page);
49544@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
49545 FSCACHE_COOKIE_STORING_TAG);
49546 if (!radix_tree_tag_get(&cookie->stores, page->index,
49547 FSCACHE_COOKIE_PENDING_TAG)) {
49548- fscache_stat(&fscache_n_store_radix_deletes);
49549+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49550 xpage = radix_tree_delete(&cookie->stores, page->index);
49551 }
49552 spin_unlock(&cookie->stores_lock);
49553@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
49554
49555 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
49556
49557- fscache_stat(&fscache_n_attr_changed_calls);
49558+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
49559
49560 if (fscache_object_is_active(object)) {
49561 fscache_set_op_state(op, "CallFS");
49562@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49563
49564 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49565
49566- fscache_stat(&fscache_n_attr_changed);
49567+ fscache_stat_unchecked(&fscache_n_attr_changed);
49568
49569 op = kzalloc(sizeof(*op), GFP_KERNEL);
49570 if (!op) {
49571- fscache_stat(&fscache_n_attr_changed_nomem);
49572+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
49573 _leave(" = -ENOMEM");
49574 return -ENOMEM;
49575 }
49576@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49577 if (fscache_submit_exclusive_op(object, op) < 0)
49578 goto nobufs;
49579 spin_unlock(&cookie->lock);
49580- fscache_stat(&fscache_n_attr_changed_ok);
49581+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
49582 fscache_put_operation(op);
49583 _leave(" = 0");
49584 return 0;
49585@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49586 nobufs:
49587 spin_unlock(&cookie->lock);
49588 kfree(op);
49589- fscache_stat(&fscache_n_attr_changed_nobufs);
49590+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
49591 _leave(" = %d", -ENOBUFS);
49592 return -ENOBUFS;
49593 }
49594@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
49595 /* allocate a retrieval operation and attempt to submit it */
49596 op = kzalloc(sizeof(*op), GFP_NOIO);
49597 if (!op) {
49598- fscache_stat(&fscache_n_retrievals_nomem);
49599+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49600 return NULL;
49601 }
49602
49603@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49604 return 0;
49605 }
49606
49607- fscache_stat(&fscache_n_retrievals_wait);
49608+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
49609
49610 jif = jiffies;
49611 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
49612 fscache_wait_bit_interruptible,
49613 TASK_INTERRUPTIBLE) != 0) {
49614- fscache_stat(&fscache_n_retrievals_intr);
49615+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49616 _leave(" = -ERESTARTSYS");
49617 return -ERESTARTSYS;
49618 }
49619@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49620 */
49621 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49622 struct fscache_retrieval *op,
49623- atomic_t *stat_op_waits,
49624- atomic_t *stat_object_dead)
49625+ atomic_unchecked_t *stat_op_waits,
49626+ atomic_unchecked_t *stat_object_dead)
49627 {
49628 int ret;
49629
49630@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49631 goto check_if_dead;
49632
49633 _debug(">>> WT");
49634- fscache_stat(stat_op_waits);
49635+ fscache_stat_unchecked(stat_op_waits);
49636 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
49637 fscache_wait_bit_interruptible,
49638 TASK_INTERRUPTIBLE) < 0) {
49639@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49640
49641 check_if_dead:
49642 if (unlikely(fscache_object_is_dead(object))) {
49643- fscache_stat(stat_object_dead);
49644+ fscache_stat_unchecked(stat_object_dead);
49645 return -ENOBUFS;
49646 }
49647 return 0;
49648@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49649
49650 _enter("%p,%p,,,", cookie, page);
49651
49652- fscache_stat(&fscache_n_retrievals);
49653+ fscache_stat_unchecked(&fscache_n_retrievals);
49654
49655 if (hlist_empty(&cookie->backing_objects))
49656 goto nobufs;
49657@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49658 goto nobufs_unlock;
49659 spin_unlock(&cookie->lock);
49660
49661- fscache_stat(&fscache_n_retrieval_ops);
49662+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
49663
49664 /* pin the netfs read context in case we need to do the actual netfs
49665 * read because we've encountered a cache read failure */
49666@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49667
49668 error:
49669 if (ret == -ENOMEM)
49670- fscache_stat(&fscache_n_retrievals_nomem);
49671+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49672 else if (ret == -ERESTARTSYS)
49673- fscache_stat(&fscache_n_retrievals_intr);
49674+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49675 else if (ret == -ENODATA)
49676- fscache_stat(&fscache_n_retrievals_nodata);
49677+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49678 else if (ret < 0)
49679- fscache_stat(&fscache_n_retrievals_nobufs);
49680+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49681 else
49682- fscache_stat(&fscache_n_retrievals_ok);
49683+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
49684
49685 fscache_put_retrieval(op);
49686 _leave(" = %d", ret);
49687@@ -453,7 +453,7 @@ nobufs_unlock:
49688 spin_unlock(&cookie->lock);
49689 kfree(op);
49690 nobufs:
49691- fscache_stat(&fscache_n_retrievals_nobufs);
49692+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49693 _leave(" = -ENOBUFS");
49694 return -ENOBUFS;
49695 }
49696@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49697
49698 _enter("%p,,%d,,,", cookie, *nr_pages);
49699
49700- fscache_stat(&fscache_n_retrievals);
49701+ fscache_stat_unchecked(&fscache_n_retrievals);
49702
49703 if (hlist_empty(&cookie->backing_objects))
49704 goto nobufs;
49705@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49706 goto nobufs_unlock;
49707 spin_unlock(&cookie->lock);
49708
49709- fscache_stat(&fscache_n_retrieval_ops);
49710+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
49711
49712 /* pin the netfs read context in case we need to do the actual netfs
49713 * read because we've encountered a cache read failure */
49714@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49715
49716 error:
49717 if (ret == -ENOMEM)
49718- fscache_stat(&fscache_n_retrievals_nomem);
49719+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49720 else if (ret == -ERESTARTSYS)
49721- fscache_stat(&fscache_n_retrievals_intr);
49722+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
49723 else if (ret == -ENODATA)
49724- fscache_stat(&fscache_n_retrievals_nodata);
49725+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49726 else if (ret < 0)
49727- fscache_stat(&fscache_n_retrievals_nobufs);
49728+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49729 else
49730- fscache_stat(&fscache_n_retrievals_ok);
49731+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
49732
49733 fscache_put_retrieval(op);
49734 _leave(" = %d", ret);
49735@@ -570,7 +570,7 @@ nobufs_unlock:
49736 spin_unlock(&cookie->lock);
49737 kfree(op);
49738 nobufs:
49739- fscache_stat(&fscache_n_retrievals_nobufs);
49740+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49741 _leave(" = -ENOBUFS");
49742 return -ENOBUFS;
49743 }
49744@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49745
49746 _enter("%p,%p,,,", cookie, page);
49747
49748- fscache_stat(&fscache_n_allocs);
49749+ fscache_stat_unchecked(&fscache_n_allocs);
49750
49751 if (hlist_empty(&cookie->backing_objects))
49752 goto nobufs;
49753@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49754 goto nobufs_unlock;
49755 spin_unlock(&cookie->lock);
49756
49757- fscache_stat(&fscache_n_alloc_ops);
49758+ fscache_stat_unchecked(&fscache_n_alloc_ops);
49759
49760 ret = fscache_wait_for_retrieval_activation(
49761 object, op,
49762@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
49763
49764 error:
49765 if (ret == -ERESTARTSYS)
49766- fscache_stat(&fscache_n_allocs_intr);
49767+ fscache_stat_unchecked(&fscache_n_allocs_intr);
49768 else if (ret < 0)
49769- fscache_stat(&fscache_n_allocs_nobufs);
49770+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49771 else
49772- fscache_stat(&fscache_n_allocs_ok);
49773+ fscache_stat_unchecked(&fscache_n_allocs_ok);
49774
49775 fscache_put_retrieval(op);
49776 _leave(" = %d", ret);
49777@@ -651,7 +651,7 @@ nobufs_unlock:
49778 spin_unlock(&cookie->lock);
49779 kfree(op);
49780 nobufs:
49781- fscache_stat(&fscache_n_allocs_nobufs);
49782+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
49783 _leave(" = -ENOBUFS");
49784 return -ENOBUFS;
49785 }
49786@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49787
49788 spin_lock(&cookie->stores_lock);
49789
49790- fscache_stat(&fscache_n_store_calls);
49791+ fscache_stat_unchecked(&fscache_n_store_calls);
49792
49793 /* find a page to store */
49794 page = NULL;
49795@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49796 page = results[0];
49797 _debug("gang %d [%lx]", n, page->index);
49798 if (page->index > op->store_limit) {
49799- fscache_stat(&fscache_n_store_pages_over_limit);
49800+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
49801 goto superseded;
49802 }
49803
49804@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
49805
49806 if (page) {
49807 fscache_set_op_state(&op->op, "Store");
49808- fscache_stat(&fscache_n_store_pages);
49809+ fscache_stat_unchecked(&fscache_n_store_pages);
49810 fscache_stat(&fscache_n_cop_write_page);
49811 ret = object->cache->ops->write_page(op, page);
49812 fscache_stat_d(&fscache_n_cop_write_page);
49813@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49814 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49815 ASSERT(PageFsCache(page));
49816
49817- fscache_stat(&fscache_n_stores);
49818+ fscache_stat_unchecked(&fscache_n_stores);
49819
49820 op = kzalloc(sizeof(*op), GFP_NOIO);
49821 if (!op)
49822@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49823 spin_unlock(&cookie->stores_lock);
49824 spin_unlock(&object->lock);
49825
49826- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
49827+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
49828 op->store_limit = object->store_limit;
49829
49830 if (fscache_submit_op(object, &op->op) < 0)
49831@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49832
49833 spin_unlock(&cookie->lock);
49834 radix_tree_preload_end();
49835- fscache_stat(&fscache_n_store_ops);
49836- fscache_stat(&fscache_n_stores_ok);
49837+ fscache_stat_unchecked(&fscache_n_store_ops);
49838+ fscache_stat_unchecked(&fscache_n_stores_ok);
49839
49840 /* the slow work queue now carries its own ref on the object */
49841 fscache_put_operation(&op->op);
49842@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
49843 return 0;
49844
49845 already_queued:
49846- fscache_stat(&fscache_n_stores_again);
49847+ fscache_stat_unchecked(&fscache_n_stores_again);
49848 already_pending:
49849 spin_unlock(&cookie->stores_lock);
49850 spin_unlock(&object->lock);
49851 spin_unlock(&cookie->lock);
49852 radix_tree_preload_end();
49853 kfree(op);
49854- fscache_stat(&fscache_n_stores_ok);
49855+ fscache_stat_unchecked(&fscache_n_stores_ok);
49856 _leave(" = 0");
49857 return 0;
49858
49859@@ -886,14 +886,14 @@ nobufs:
49860 spin_unlock(&cookie->lock);
49861 radix_tree_preload_end();
49862 kfree(op);
49863- fscache_stat(&fscache_n_stores_nobufs);
49864+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
49865 _leave(" = -ENOBUFS");
49866 return -ENOBUFS;
49867
49868 nomem_free:
49869 kfree(op);
49870 nomem:
49871- fscache_stat(&fscache_n_stores_oom);
49872+ fscache_stat_unchecked(&fscache_n_stores_oom);
49873 _leave(" = -ENOMEM");
49874 return -ENOMEM;
49875 }
49876@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
49877 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49878 ASSERTCMP(page, !=, NULL);
49879
49880- fscache_stat(&fscache_n_uncaches);
49881+ fscache_stat_unchecked(&fscache_n_uncaches);
49882
49883 /* cache withdrawal may beat us to it */
49884 if (!PageFsCache(page))
49885@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
49886 unsigned long loop;
49887
49888 #ifdef CONFIG_FSCACHE_STATS
49889- atomic_add(pagevec->nr, &fscache_n_marks);
49890+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
49891 #endif
49892
49893 for (loop = 0; loop < pagevec->nr; loop++) {
49894diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
49895index 46435f3..8cddf18 100644
49896--- a/fs/fscache/stats.c
49897+++ b/fs/fscache/stats.c
49898@@ -18,95 +18,95 @@
49899 /*
49900 * operation counters
49901 */
49902-atomic_t fscache_n_op_pend;
49903-atomic_t fscache_n_op_run;
49904-atomic_t fscache_n_op_enqueue;
49905-atomic_t fscache_n_op_requeue;
49906-atomic_t fscache_n_op_deferred_release;
49907-atomic_t fscache_n_op_release;
49908-atomic_t fscache_n_op_gc;
49909-atomic_t fscache_n_op_cancelled;
49910-atomic_t fscache_n_op_rejected;
49911-
49912-atomic_t fscache_n_attr_changed;
49913-atomic_t fscache_n_attr_changed_ok;
49914-atomic_t fscache_n_attr_changed_nobufs;
49915-atomic_t fscache_n_attr_changed_nomem;
49916-atomic_t fscache_n_attr_changed_calls;
49917-
49918-atomic_t fscache_n_allocs;
49919-atomic_t fscache_n_allocs_ok;
49920-atomic_t fscache_n_allocs_wait;
49921-atomic_t fscache_n_allocs_nobufs;
49922-atomic_t fscache_n_allocs_intr;
49923-atomic_t fscache_n_allocs_object_dead;
49924-atomic_t fscache_n_alloc_ops;
49925-atomic_t fscache_n_alloc_op_waits;
49926-
49927-atomic_t fscache_n_retrievals;
49928-atomic_t fscache_n_retrievals_ok;
49929-atomic_t fscache_n_retrievals_wait;
49930-atomic_t fscache_n_retrievals_nodata;
49931-atomic_t fscache_n_retrievals_nobufs;
49932-atomic_t fscache_n_retrievals_intr;
49933-atomic_t fscache_n_retrievals_nomem;
49934-atomic_t fscache_n_retrievals_object_dead;
49935-atomic_t fscache_n_retrieval_ops;
49936-atomic_t fscache_n_retrieval_op_waits;
49937-
49938-atomic_t fscache_n_stores;
49939-atomic_t fscache_n_stores_ok;
49940-atomic_t fscache_n_stores_again;
49941-atomic_t fscache_n_stores_nobufs;
49942-atomic_t fscache_n_stores_oom;
49943-atomic_t fscache_n_store_ops;
49944-atomic_t fscache_n_store_calls;
49945-atomic_t fscache_n_store_pages;
49946-atomic_t fscache_n_store_radix_deletes;
49947-atomic_t fscache_n_store_pages_over_limit;
49948-
49949-atomic_t fscache_n_store_vmscan_not_storing;
49950-atomic_t fscache_n_store_vmscan_gone;
49951-atomic_t fscache_n_store_vmscan_busy;
49952-atomic_t fscache_n_store_vmscan_cancelled;
49953-
49954-atomic_t fscache_n_marks;
49955-atomic_t fscache_n_uncaches;
49956-
49957-atomic_t fscache_n_acquires;
49958-atomic_t fscache_n_acquires_null;
49959-atomic_t fscache_n_acquires_no_cache;
49960-atomic_t fscache_n_acquires_ok;
49961-atomic_t fscache_n_acquires_nobufs;
49962-atomic_t fscache_n_acquires_oom;
49963-
49964-atomic_t fscache_n_updates;
49965-atomic_t fscache_n_updates_null;
49966-atomic_t fscache_n_updates_run;
49967-
49968-atomic_t fscache_n_relinquishes;
49969-atomic_t fscache_n_relinquishes_null;
49970-atomic_t fscache_n_relinquishes_waitcrt;
49971-atomic_t fscache_n_relinquishes_retire;
49972-
49973-atomic_t fscache_n_cookie_index;
49974-atomic_t fscache_n_cookie_data;
49975-atomic_t fscache_n_cookie_special;
49976-
49977-atomic_t fscache_n_object_alloc;
49978-atomic_t fscache_n_object_no_alloc;
49979-atomic_t fscache_n_object_lookups;
49980-atomic_t fscache_n_object_lookups_negative;
49981-atomic_t fscache_n_object_lookups_positive;
49982-atomic_t fscache_n_object_lookups_timed_out;
49983-atomic_t fscache_n_object_created;
49984-atomic_t fscache_n_object_avail;
49985-atomic_t fscache_n_object_dead;
49986-
49987-atomic_t fscache_n_checkaux_none;
49988-atomic_t fscache_n_checkaux_okay;
49989-atomic_t fscache_n_checkaux_update;
49990-atomic_t fscache_n_checkaux_obsolete;
49991+atomic_unchecked_t fscache_n_op_pend;
49992+atomic_unchecked_t fscache_n_op_run;
49993+atomic_unchecked_t fscache_n_op_enqueue;
49994+atomic_unchecked_t fscache_n_op_requeue;
49995+atomic_unchecked_t fscache_n_op_deferred_release;
49996+atomic_unchecked_t fscache_n_op_release;
49997+atomic_unchecked_t fscache_n_op_gc;
49998+atomic_unchecked_t fscache_n_op_cancelled;
49999+atomic_unchecked_t fscache_n_op_rejected;
50000+
50001+atomic_unchecked_t fscache_n_attr_changed;
50002+atomic_unchecked_t fscache_n_attr_changed_ok;
50003+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50004+atomic_unchecked_t fscache_n_attr_changed_nomem;
50005+atomic_unchecked_t fscache_n_attr_changed_calls;
50006+
50007+atomic_unchecked_t fscache_n_allocs;
50008+atomic_unchecked_t fscache_n_allocs_ok;
50009+atomic_unchecked_t fscache_n_allocs_wait;
50010+atomic_unchecked_t fscache_n_allocs_nobufs;
50011+atomic_unchecked_t fscache_n_allocs_intr;
50012+atomic_unchecked_t fscache_n_allocs_object_dead;
50013+atomic_unchecked_t fscache_n_alloc_ops;
50014+atomic_unchecked_t fscache_n_alloc_op_waits;
50015+
50016+atomic_unchecked_t fscache_n_retrievals;
50017+atomic_unchecked_t fscache_n_retrievals_ok;
50018+atomic_unchecked_t fscache_n_retrievals_wait;
50019+atomic_unchecked_t fscache_n_retrievals_nodata;
50020+atomic_unchecked_t fscache_n_retrievals_nobufs;
50021+atomic_unchecked_t fscache_n_retrievals_intr;
50022+atomic_unchecked_t fscache_n_retrievals_nomem;
50023+atomic_unchecked_t fscache_n_retrievals_object_dead;
50024+atomic_unchecked_t fscache_n_retrieval_ops;
50025+atomic_unchecked_t fscache_n_retrieval_op_waits;
50026+
50027+atomic_unchecked_t fscache_n_stores;
50028+atomic_unchecked_t fscache_n_stores_ok;
50029+atomic_unchecked_t fscache_n_stores_again;
50030+atomic_unchecked_t fscache_n_stores_nobufs;
50031+atomic_unchecked_t fscache_n_stores_oom;
50032+atomic_unchecked_t fscache_n_store_ops;
50033+atomic_unchecked_t fscache_n_store_calls;
50034+atomic_unchecked_t fscache_n_store_pages;
50035+atomic_unchecked_t fscache_n_store_radix_deletes;
50036+atomic_unchecked_t fscache_n_store_pages_over_limit;
50037+
50038+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50039+atomic_unchecked_t fscache_n_store_vmscan_gone;
50040+atomic_unchecked_t fscache_n_store_vmscan_busy;
50041+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50042+
50043+atomic_unchecked_t fscache_n_marks;
50044+atomic_unchecked_t fscache_n_uncaches;
50045+
50046+atomic_unchecked_t fscache_n_acquires;
50047+atomic_unchecked_t fscache_n_acquires_null;
50048+atomic_unchecked_t fscache_n_acquires_no_cache;
50049+atomic_unchecked_t fscache_n_acquires_ok;
50050+atomic_unchecked_t fscache_n_acquires_nobufs;
50051+atomic_unchecked_t fscache_n_acquires_oom;
50052+
50053+atomic_unchecked_t fscache_n_updates;
50054+atomic_unchecked_t fscache_n_updates_null;
50055+atomic_unchecked_t fscache_n_updates_run;
50056+
50057+atomic_unchecked_t fscache_n_relinquishes;
50058+atomic_unchecked_t fscache_n_relinquishes_null;
50059+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50060+atomic_unchecked_t fscache_n_relinquishes_retire;
50061+
50062+atomic_unchecked_t fscache_n_cookie_index;
50063+atomic_unchecked_t fscache_n_cookie_data;
50064+atomic_unchecked_t fscache_n_cookie_special;
50065+
50066+atomic_unchecked_t fscache_n_object_alloc;
50067+atomic_unchecked_t fscache_n_object_no_alloc;
50068+atomic_unchecked_t fscache_n_object_lookups;
50069+atomic_unchecked_t fscache_n_object_lookups_negative;
50070+atomic_unchecked_t fscache_n_object_lookups_positive;
50071+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50072+atomic_unchecked_t fscache_n_object_created;
50073+atomic_unchecked_t fscache_n_object_avail;
50074+atomic_unchecked_t fscache_n_object_dead;
50075+
50076+atomic_unchecked_t fscache_n_checkaux_none;
50077+atomic_unchecked_t fscache_n_checkaux_okay;
50078+atomic_unchecked_t fscache_n_checkaux_update;
50079+atomic_unchecked_t fscache_n_checkaux_obsolete;
50080
50081 atomic_t fscache_n_cop_alloc_object;
50082 atomic_t fscache_n_cop_lookup_object;
50083@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50084 seq_puts(m, "FS-Cache statistics\n");
50085
50086 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50087- atomic_read(&fscache_n_cookie_index),
50088- atomic_read(&fscache_n_cookie_data),
50089- atomic_read(&fscache_n_cookie_special));
50090+ atomic_read_unchecked(&fscache_n_cookie_index),
50091+ atomic_read_unchecked(&fscache_n_cookie_data),
50092+ atomic_read_unchecked(&fscache_n_cookie_special));
50093
50094 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50095- atomic_read(&fscache_n_object_alloc),
50096- atomic_read(&fscache_n_object_no_alloc),
50097- atomic_read(&fscache_n_object_avail),
50098- atomic_read(&fscache_n_object_dead));
50099+ atomic_read_unchecked(&fscache_n_object_alloc),
50100+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50101+ atomic_read_unchecked(&fscache_n_object_avail),
50102+ atomic_read_unchecked(&fscache_n_object_dead));
50103 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50104- atomic_read(&fscache_n_checkaux_none),
50105- atomic_read(&fscache_n_checkaux_okay),
50106- atomic_read(&fscache_n_checkaux_update),
50107- atomic_read(&fscache_n_checkaux_obsolete));
50108+ atomic_read_unchecked(&fscache_n_checkaux_none),
50109+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50110+ atomic_read_unchecked(&fscache_n_checkaux_update),
50111+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50112
50113 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50114- atomic_read(&fscache_n_marks),
50115- atomic_read(&fscache_n_uncaches));
50116+ atomic_read_unchecked(&fscache_n_marks),
50117+ atomic_read_unchecked(&fscache_n_uncaches));
50118
50119 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50120 " oom=%u\n",
50121- atomic_read(&fscache_n_acquires),
50122- atomic_read(&fscache_n_acquires_null),
50123- atomic_read(&fscache_n_acquires_no_cache),
50124- atomic_read(&fscache_n_acquires_ok),
50125- atomic_read(&fscache_n_acquires_nobufs),
50126- atomic_read(&fscache_n_acquires_oom));
50127+ atomic_read_unchecked(&fscache_n_acquires),
50128+ atomic_read_unchecked(&fscache_n_acquires_null),
50129+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50130+ atomic_read_unchecked(&fscache_n_acquires_ok),
50131+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50132+ atomic_read_unchecked(&fscache_n_acquires_oom));
50133
50134 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50135- atomic_read(&fscache_n_object_lookups),
50136- atomic_read(&fscache_n_object_lookups_negative),
50137- atomic_read(&fscache_n_object_lookups_positive),
50138- atomic_read(&fscache_n_object_lookups_timed_out),
50139- atomic_read(&fscache_n_object_created));
50140+ atomic_read_unchecked(&fscache_n_object_lookups),
50141+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50142+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50143+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50144+ atomic_read_unchecked(&fscache_n_object_created));
50145
50146 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50147- atomic_read(&fscache_n_updates),
50148- atomic_read(&fscache_n_updates_null),
50149- atomic_read(&fscache_n_updates_run));
50150+ atomic_read_unchecked(&fscache_n_updates),
50151+ atomic_read_unchecked(&fscache_n_updates_null),
50152+ atomic_read_unchecked(&fscache_n_updates_run));
50153
50154 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50155- atomic_read(&fscache_n_relinquishes),
50156- atomic_read(&fscache_n_relinquishes_null),
50157- atomic_read(&fscache_n_relinquishes_waitcrt),
50158- atomic_read(&fscache_n_relinquishes_retire));
50159+ atomic_read_unchecked(&fscache_n_relinquishes),
50160+ atomic_read_unchecked(&fscache_n_relinquishes_null),
50161+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50162+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
50163
50164 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50165- atomic_read(&fscache_n_attr_changed),
50166- atomic_read(&fscache_n_attr_changed_ok),
50167- atomic_read(&fscache_n_attr_changed_nobufs),
50168- atomic_read(&fscache_n_attr_changed_nomem),
50169- atomic_read(&fscache_n_attr_changed_calls));
50170+ atomic_read_unchecked(&fscache_n_attr_changed),
50171+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
50172+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50173+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50174+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
50175
50176 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50177- atomic_read(&fscache_n_allocs),
50178- atomic_read(&fscache_n_allocs_ok),
50179- atomic_read(&fscache_n_allocs_wait),
50180- atomic_read(&fscache_n_allocs_nobufs),
50181- atomic_read(&fscache_n_allocs_intr));
50182+ atomic_read_unchecked(&fscache_n_allocs),
50183+ atomic_read_unchecked(&fscache_n_allocs_ok),
50184+ atomic_read_unchecked(&fscache_n_allocs_wait),
50185+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
50186+ atomic_read_unchecked(&fscache_n_allocs_intr));
50187 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50188- atomic_read(&fscache_n_alloc_ops),
50189- atomic_read(&fscache_n_alloc_op_waits),
50190- atomic_read(&fscache_n_allocs_object_dead));
50191+ atomic_read_unchecked(&fscache_n_alloc_ops),
50192+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
50193+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
50194
50195 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50196 " int=%u oom=%u\n",
50197- atomic_read(&fscache_n_retrievals),
50198- atomic_read(&fscache_n_retrievals_ok),
50199- atomic_read(&fscache_n_retrievals_wait),
50200- atomic_read(&fscache_n_retrievals_nodata),
50201- atomic_read(&fscache_n_retrievals_nobufs),
50202- atomic_read(&fscache_n_retrievals_intr),
50203- atomic_read(&fscache_n_retrievals_nomem));
50204+ atomic_read_unchecked(&fscache_n_retrievals),
50205+ atomic_read_unchecked(&fscache_n_retrievals_ok),
50206+ atomic_read_unchecked(&fscache_n_retrievals_wait),
50207+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
50208+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50209+ atomic_read_unchecked(&fscache_n_retrievals_intr),
50210+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
50211 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50212- atomic_read(&fscache_n_retrieval_ops),
50213- atomic_read(&fscache_n_retrieval_op_waits),
50214- atomic_read(&fscache_n_retrievals_object_dead));
50215+ atomic_read_unchecked(&fscache_n_retrieval_ops),
50216+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50217+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50218
50219 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50220- atomic_read(&fscache_n_stores),
50221- atomic_read(&fscache_n_stores_ok),
50222- atomic_read(&fscache_n_stores_again),
50223- atomic_read(&fscache_n_stores_nobufs),
50224- atomic_read(&fscache_n_stores_oom));
50225+ atomic_read_unchecked(&fscache_n_stores),
50226+ atomic_read_unchecked(&fscache_n_stores_ok),
50227+ atomic_read_unchecked(&fscache_n_stores_again),
50228+ atomic_read_unchecked(&fscache_n_stores_nobufs),
50229+ atomic_read_unchecked(&fscache_n_stores_oom));
50230 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50231- atomic_read(&fscache_n_store_ops),
50232- atomic_read(&fscache_n_store_calls),
50233- atomic_read(&fscache_n_store_pages),
50234- atomic_read(&fscache_n_store_radix_deletes),
50235- atomic_read(&fscache_n_store_pages_over_limit));
50236+ atomic_read_unchecked(&fscache_n_store_ops),
50237+ atomic_read_unchecked(&fscache_n_store_calls),
50238+ atomic_read_unchecked(&fscache_n_store_pages),
50239+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
50240+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50241
50242 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50243- atomic_read(&fscache_n_store_vmscan_not_storing),
50244- atomic_read(&fscache_n_store_vmscan_gone),
50245- atomic_read(&fscache_n_store_vmscan_busy),
50246- atomic_read(&fscache_n_store_vmscan_cancelled));
50247+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50248+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50249+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50250+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50251
50252 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50253- atomic_read(&fscache_n_op_pend),
50254- atomic_read(&fscache_n_op_run),
50255- atomic_read(&fscache_n_op_enqueue),
50256- atomic_read(&fscache_n_op_cancelled),
50257- atomic_read(&fscache_n_op_rejected));
50258+ atomic_read_unchecked(&fscache_n_op_pend),
50259+ atomic_read_unchecked(&fscache_n_op_run),
50260+ atomic_read_unchecked(&fscache_n_op_enqueue),
50261+ atomic_read_unchecked(&fscache_n_op_cancelled),
50262+ atomic_read_unchecked(&fscache_n_op_rejected));
50263 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50264- atomic_read(&fscache_n_op_deferred_release),
50265- atomic_read(&fscache_n_op_release),
50266- atomic_read(&fscache_n_op_gc));
50267+ atomic_read_unchecked(&fscache_n_op_deferred_release),
50268+ atomic_read_unchecked(&fscache_n_op_release),
50269+ atomic_read_unchecked(&fscache_n_op_gc));
50270
50271 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50272 atomic_read(&fscache_n_cop_alloc_object),
50273diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50274index de792dc..448b532 100644
50275--- a/fs/fuse/cuse.c
50276+++ b/fs/fuse/cuse.c
50277@@ -576,10 +576,12 @@ static int __init cuse_init(void)
50278 INIT_LIST_HEAD(&cuse_conntbl[i]);
50279
50280 /* inherit and extend fuse_dev_operations */
50281- cuse_channel_fops = fuse_dev_operations;
50282- cuse_channel_fops.owner = THIS_MODULE;
50283- cuse_channel_fops.open = cuse_channel_open;
50284- cuse_channel_fops.release = cuse_channel_release;
50285+ pax_open_kernel();
50286+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50287+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50288+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
50289+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
50290+ pax_close_kernel();
50291
50292 cuse_class = class_create(THIS_MODULE, "cuse");
50293 if (IS_ERR(cuse_class))
50294diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50295index 1facb39..7f48557 100644
50296--- a/fs/fuse/dev.c
50297+++ b/fs/fuse/dev.c
50298@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50299 {
50300 struct fuse_notify_inval_entry_out outarg;
50301 int err = -EINVAL;
50302- char buf[FUSE_NAME_MAX+1];
50303+ char *buf = NULL;
50304 struct qstr name;
50305
50306 if (size < sizeof(outarg))
50307@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50308 if (outarg.namelen > FUSE_NAME_MAX)
50309 goto err;
50310
50311+ err = -ENOMEM;
50312+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50313+ if (!buf)
50314+ goto err;
50315+
50316 err = -EINVAL;
50317 if (size != sizeof(outarg) + outarg.namelen + 1)
50318 goto err;
50319@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50320
50321 down_read(&fc->killsb);
50322 err = -ENOENT;
50323- if (!fc->sb)
50324- goto err_unlock;
50325-
50326- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50327-
50328-err_unlock:
50329+ if (fc->sb)
50330+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50331 up_read(&fc->killsb);
50332+ kfree(buf);
50333 return err;
50334
50335 err:
50336 fuse_copy_finish(cs);
50337+ kfree(buf);
50338 return err;
50339 }
50340
50341diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50342index 4787ae6..73efff7 100644
50343--- a/fs/fuse/dir.c
50344+++ b/fs/fuse/dir.c
50345@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50346 return link;
50347 }
50348
50349-static void free_link(char *link)
50350+static void free_link(const char *link)
50351 {
50352 if (!IS_ERR(link))
50353 free_page((unsigned long) link);
50354diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50355index 247436c..e650ccb 100644
50356--- a/fs/gfs2/ops_inode.c
50357+++ b/fs/gfs2/ops_inode.c
50358@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50359 unsigned int x;
50360 int error;
50361
50362+ pax_track_stack();
50363+
50364 if (ndentry->d_inode) {
50365 nip = GFS2_I(ndentry->d_inode);
50366 if (ip == nip)
50367diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50368index 4463297..4fed53b 100644
50369--- a/fs/gfs2/sys.c
50370+++ b/fs/gfs2/sys.c
50371@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50372 return a->store ? a->store(sdp, buf, len) : len;
50373 }
50374
50375-static struct sysfs_ops gfs2_attr_ops = {
50376+static const struct sysfs_ops gfs2_attr_ops = {
50377 .show = gfs2_attr_show,
50378 .store = gfs2_attr_store,
50379 };
50380@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50381 return 0;
50382 }
50383
50384-static struct kset_uevent_ops gfs2_uevent_ops = {
50385+static const struct kset_uevent_ops gfs2_uevent_ops = {
50386 .uevent = gfs2_uevent,
50387 };
50388
50389diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
50390index 052f214..2462c5b 100644
50391--- a/fs/hfs/btree.c
50392+++ b/fs/hfs/btree.c
50393@@ -45,11 +45,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
50394 case HFS_EXT_CNID:
50395 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
50396 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
50397+
50398+ if (HFS_I(tree->inode)->alloc_blocks >
50399+ HFS_I(tree->inode)->first_blocks) {
50400+ printk(KERN_ERR "hfs: invalid btree extent records\n");
50401+ unlock_new_inode(tree->inode);
50402+ goto free_inode;
50403+ }
50404+
50405 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
50406 break;
50407 case HFS_CAT_CNID:
50408 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
50409 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
50410+
50411+ if (!HFS_I(tree->inode)->first_blocks) {
50412+ printk(KERN_ERR "hfs: invalid btree extent records "
50413+ "(0 size).\n");
50414+ unlock_new_inode(tree->inode);
50415+ goto free_inode;
50416+ }
50417+
50418 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
50419 break;
50420 default:
50421@@ -58,11 +74,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
50422 }
50423 unlock_new_inode(tree->inode);
50424
50425- if (!HFS_I(tree->inode)->first_blocks) {
50426- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
50427- goto free_inode;
50428- }
50429-
50430 mapping = tree->inode->i_mapping;
50431 page = read_mapping_page(mapping, 0, NULL);
50432 if (IS_ERR(page))
50433diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50434index f6874ac..7cd98a8 100644
50435--- a/fs/hfsplus/catalog.c
50436+++ b/fs/hfsplus/catalog.c
50437@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50438 int err;
50439 u16 type;
50440
50441+ pax_track_stack();
50442+
50443 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50444 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50445 if (err)
50446@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50447 int entry_size;
50448 int err;
50449
50450+ pax_track_stack();
50451+
50452 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50453 sb = dir->i_sb;
50454 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50455@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50456 int entry_size, type;
50457 int err = 0;
50458
50459+ pax_track_stack();
50460+
50461 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50462 dst_dir->i_ino, dst_name->name);
50463 sb = src_dir->i_sb;
50464diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50465index 5f40236..dac3421 100644
50466--- a/fs/hfsplus/dir.c
50467+++ b/fs/hfsplus/dir.c
50468@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50469 struct hfsplus_readdir_data *rd;
50470 u16 type;
50471
50472+ pax_track_stack();
50473+
50474 if (filp->f_pos >= inode->i_size)
50475 return 0;
50476
50477diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50478index 1bcf597..905a251 100644
50479--- a/fs/hfsplus/inode.c
50480+++ b/fs/hfsplus/inode.c
50481@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50482 int res = 0;
50483 u16 type;
50484
50485+ pax_track_stack();
50486+
50487 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50488
50489 HFSPLUS_I(inode).dev = 0;
50490@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50491 struct hfs_find_data fd;
50492 hfsplus_cat_entry entry;
50493
50494+ pax_track_stack();
50495+
50496 if (HFSPLUS_IS_RSRC(inode))
50497 main_inode = HFSPLUS_I(inode).rsrc_inode;
50498
50499diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50500index f457d2c..7ef4ad5 100644
50501--- a/fs/hfsplus/ioctl.c
50502+++ b/fs/hfsplus/ioctl.c
50503@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
50504 struct hfsplus_cat_file *file;
50505 int res;
50506
50507+ pax_track_stack();
50508+
50509 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50510 return -EOPNOTSUPP;
50511
50512@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
50513 struct hfsplus_cat_file *file;
50514 ssize_t res = 0;
50515
50516+ pax_track_stack();
50517+
50518 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50519 return -EOPNOTSUPP;
50520
50521diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
50522index 43022f3..7298079 100644
50523--- a/fs/hfsplus/super.c
50524+++ b/fs/hfsplus/super.c
50525@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
50526 struct nls_table *nls = NULL;
50527 int err = -EINVAL;
50528
50529+ pax_track_stack();
50530+
50531 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
50532 if (!sbi)
50533 return -ENOMEM;
50534diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
50535index 87a1258..5694d91 100644
50536--- a/fs/hugetlbfs/inode.c
50537+++ b/fs/hugetlbfs/inode.c
50538@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
50539 .kill_sb = kill_litter_super,
50540 };
50541
50542-static struct vfsmount *hugetlbfs_vfsmount;
50543+struct vfsmount *hugetlbfs_vfsmount;
50544
50545 static int can_do_hugetlb_shm(void)
50546 {
50547diff --git a/fs/ioctl.c b/fs/ioctl.c
50548index 6c75110..19d2c3c 100644
50549--- a/fs/ioctl.c
50550+++ b/fs/ioctl.c
50551@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
50552 u64 phys, u64 len, u32 flags)
50553 {
50554 struct fiemap_extent extent;
50555- struct fiemap_extent *dest = fieinfo->fi_extents_start;
50556+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
50557
50558 /* only count the extents */
50559 if (fieinfo->fi_extents_max == 0) {
50560@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50561
50562 fieinfo.fi_flags = fiemap.fm_flags;
50563 fieinfo.fi_extents_max = fiemap.fm_extent_count;
50564- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
50565+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
50566
50567 if (fiemap.fm_extent_count != 0 &&
50568 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
50569@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50570 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
50571 fiemap.fm_flags = fieinfo.fi_flags;
50572 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
50573- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
50574+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
50575 error = -EFAULT;
50576
50577 return error;
50578diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
50579index b0435dd..81ee0be 100644
50580--- a/fs/jbd/checkpoint.c
50581+++ b/fs/jbd/checkpoint.c
50582@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
50583 tid_t this_tid;
50584 int result;
50585
50586+ pax_track_stack();
50587+
50588 jbd_debug(1, "Start checkpoint\n");
50589
50590 /*
50591diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
50592index 546d153..736896c 100644
50593--- a/fs/jffs2/compr_rtime.c
50594+++ b/fs/jffs2/compr_rtime.c
50595@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
50596 int outpos = 0;
50597 int pos=0;
50598
50599+ pax_track_stack();
50600+
50601 memset(positions,0,sizeof(positions));
50602
50603 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
50604@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
50605 int outpos = 0;
50606 int pos=0;
50607
50608+ pax_track_stack();
50609+
50610 memset(positions,0,sizeof(positions));
50611
50612 while (outpos<destlen) {
50613diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
50614index 170d289..3254b98 100644
50615--- a/fs/jffs2/compr_rubin.c
50616+++ b/fs/jffs2/compr_rubin.c
50617@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
50618 int ret;
50619 uint32_t mysrclen, mydstlen;
50620
50621+ pax_track_stack();
50622+
50623 mysrclen = *sourcelen;
50624 mydstlen = *dstlen - 8;
50625
50626diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
50627index b47679b..00d65d3 100644
50628--- a/fs/jffs2/erase.c
50629+++ b/fs/jffs2/erase.c
50630@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
50631 struct jffs2_unknown_node marker = {
50632 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
50633 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50634- .totlen = cpu_to_je32(c->cleanmarker_size)
50635+ .totlen = cpu_to_je32(c->cleanmarker_size),
50636+ .hdr_crc = cpu_to_je32(0)
50637 };
50638
50639 jffs2_prealloc_raw_node_refs(c, jeb, 1);
50640diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
50641index 5ef7bac..4fd1e3c 100644
50642--- a/fs/jffs2/wbuf.c
50643+++ b/fs/jffs2/wbuf.c
50644@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
50645 {
50646 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
50647 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50648- .totlen = constant_cpu_to_je32(8)
50649+ .totlen = constant_cpu_to_je32(8),
50650+ .hdr_crc = constant_cpu_to_je32(0)
50651 };
50652
50653 /*
50654diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
50655index 082e844..52012a1 100644
50656--- a/fs/jffs2/xattr.c
50657+++ b/fs/jffs2/xattr.c
50658@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
50659
50660 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
50661
50662+ pax_track_stack();
50663+
50664 /* Phase.1 : Merge same xref */
50665 for (i=0; i < XREF_TMPHASH_SIZE; i++)
50666 xref_tmphash[i] = NULL;
50667diff --git a/fs/jfs/super.c b/fs/jfs/super.c
50668index 2234c73..f6e6e6b 100644
50669--- a/fs/jfs/super.c
50670+++ b/fs/jfs/super.c
50671@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
50672
50673 jfs_inode_cachep =
50674 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
50675- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
50676+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
50677 init_once);
50678 if (jfs_inode_cachep == NULL)
50679 return -ENOMEM;
50680diff --git a/fs/libfs.c b/fs/libfs.c
50681index ba36e93..3153fce 100644
50682--- a/fs/libfs.c
50683+++ b/fs/libfs.c
50684@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
50685
50686 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
50687 struct dentry *next;
50688+ char d_name[sizeof(next->d_iname)];
50689+ const unsigned char *name;
50690+
50691 next = list_entry(p, struct dentry, d_u.d_child);
50692 if (d_unhashed(next) || !next->d_inode)
50693 continue;
50694
50695 spin_unlock(&dcache_lock);
50696- if (filldir(dirent, next->d_name.name,
50697+ name = next->d_name.name;
50698+ if (name == next->d_iname) {
50699+ memcpy(d_name, name, next->d_name.len);
50700+ name = d_name;
50701+ }
50702+ if (filldir(dirent, name,
50703 next->d_name.len, filp->f_pos,
50704 next->d_inode->i_ino,
50705 dt_type(next->d_inode)) < 0)
50706diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
50707index c325a83..d15b07b 100644
50708--- a/fs/lockd/clntproc.c
50709+++ b/fs/lockd/clntproc.c
50710@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
50711 /*
50712 * Cookie counter for NLM requests
50713 */
50714-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
50715+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
50716
50717 void nlmclnt_next_cookie(struct nlm_cookie *c)
50718 {
50719- u32 cookie = atomic_inc_return(&nlm_cookie);
50720+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
50721
50722 memcpy(c->data, &cookie, 4);
50723 c->len=4;
50724@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
50725 struct nlm_rqst reqst, *req;
50726 int status;
50727
50728+ pax_track_stack();
50729+
50730 req = &reqst;
50731 memset(req, 0, sizeof(*req));
50732 locks_init_lock(&req->a_args.lock.fl);
50733diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
50734index 1a54ae1..6a16c27 100644
50735--- a/fs/lockd/svc.c
50736+++ b/fs/lockd/svc.c
50737@@ -43,7 +43,7 @@
50738
50739 static struct svc_program nlmsvc_program;
50740
50741-struct nlmsvc_binding * nlmsvc_ops;
50742+const struct nlmsvc_binding * nlmsvc_ops;
50743 EXPORT_SYMBOL_GPL(nlmsvc_ops);
50744
50745 static DEFINE_MUTEX(nlmsvc_mutex);
50746diff --git a/fs/locks.c b/fs/locks.c
50747index a8794f2..4041e55 100644
50748--- a/fs/locks.c
50749+++ b/fs/locks.c
50750@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
50751
50752 static struct kmem_cache *filelock_cache __read_mostly;
50753
50754+static void locks_init_lock_always(struct file_lock *fl)
50755+{
50756+ fl->fl_next = NULL;
50757+ fl->fl_fasync = NULL;
50758+ fl->fl_owner = NULL;
50759+ fl->fl_pid = 0;
50760+ fl->fl_nspid = NULL;
50761+ fl->fl_file = NULL;
50762+ fl->fl_flags = 0;
50763+ fl->fl_type = 0;
50764+ fl->fl_start = fl->fl_end = 0;
50765+}
50766+
50767 /* Allocate an empty lock structure. */
50768 static struct file_lock *locks_alloc_lock(void)
50769 {
50770- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50771+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50772+
50773+ if (fl)
50774+ locks_init_lock_always(fl);
50775+
50776+ return fl;
50777 }
50778
50779 void locks_release_private(struct file_lock *fl)
50780@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
50781 INIT_LIST_HEAD(&fl->fl_link);
50782 INIT_LIST_HEAD(&fl->fl_block);
50783 init_waitqueue_head(&fl->fl_wait);
50784- fl->fl_next = NULL;
50785- fl->fl_fasync = NULL;
50786- fl->fl_owner = NULL;
50787- fl->fl_pid = 0;
50788- fl->fl_nspid = NULL;
50789- fl->fl_file = NULL;
50790- fl->fl_flags = 0;
50791- fl->fl_type = 0;
50792- fl->fl_start = fl->fl_end = 0;
50793 fl->fl_ops = NULL;
50794 fl->fl_lmops = NULL;
50795+ locks_init_lock_always(fl);
50796 }
50797
50798 EXPORT_SYMBOL(locks_init_lock);
50799@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
50800 return;
50801
50802 if (filp->f_op && filp->f_op->flock) {
50803- struct file_lock fl = {
50804+ struct file_lock flock = {
50805 .fl_pid = current->tgid,
50806 .fl_file = filp,
50807 .fl_flags = FL_FLOCK,
50808 .fl_type = F_UNLCK,
50809 .fl_end = OFFSET_MAX,
50810 };
50811- filp->f_op->flock(filp, F_SETLKW, &fl);
50812- if (fl.fl_ops && fl.fl_ops->fl_release_private)
50813- fl.fl_ops->fl_release_private(&fl);
50814+ filp->f_op->flock(filp, F_SETLKW, &flock);
50815+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
50816+ flock.fl_ops->fl_release_private(&flock);
50817 }
50818
50819 lock_kernel();
50820diff --git a/fs/mbcache.c b/fs/mbcache.c
50821index ec88ff3..b843a82 100644
50822--- a/fs/mbcache.c
50823+++ b/fs/mbcache.c
50824@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
50825 if (!cache)
50826 goto fail;
50827 cache->c_name = name;
50828- cache->c_op.free = NULL;
50829+ *(void **)&cache->c_op.free = NULL;
50830 if (cache_op)
50831- cache->c_op.free = cache_op->free;
50832+ *(void **)&cache->c_op.free = cache_op->free;
50833 atomic_set(&cache->c_entry_count, 0);
50834 cache->c_bucket_bits = bucket_bits;
50835 #ifdef MB_CACHE_INDEXES_COUNT
50836diff --git a/fs/namei.c b/fs/namei.c
50837index b0afbd4..8d065a1 100644
50838--- a/fs/namei.c
50839+++ b/fs/namei.c
50840@@ -224,14 +224,6 @@ int generic_permission(struct inode *inode, int mask,
50841 return ret;
50842
50843 /*
50844- * Read/write DACs are always overridable.
50845- * Executable DACs are overridable if at least one exec bit is set.
50846- */
50847- if (!(mask & MAY_EXEC) || execute_ok(inode))
50848- if (capable(CAP_DAC_OVERRIDE))
50849- return 0;
50850-
50851- /*
50852 * Searching includes executable on directories, else just read.
50853 */
50854 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
50855@@ -239,6 +231,14 @@ int generic_permission(struct inode *inode, int mask,
50856 if (capable(CAP_DAC_READ_SEARCH))
50857 return 0;
50858
50859+ /*
50860+ * Read/write DACs are always overridable.
50861+ * Executable DACs are overridable if at least one exec bit is set.
50862+ */
50863+ if (!(mask & MAY_EXEC) || execute_ok(inode))
50864+ if (capable(CAP_DAC_OVERRIDE))
50865+ return 0;
50866+
50867 return -EACCES;
50868 }
50869
50870@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
50871 if (!ret)
50872 goto ok;
50873
50874- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
50875+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
50876+ capable(CAP_DAC_OVERRIDE))
50877 goto ok;
50878
50879 return ret;
50880@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
50881 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
50882 error = PTR_ERR(cookie);
50883 if (!IS_ERR(cookie)) {
50884- char *s = nd_get_link(nd);
50885+ const char *s = nd_get_link(nd);
50886 error = 0;
50887 if (s)
50888 error = __vfs_follow_link(nd, s);
50889@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
50890 err = security_inode_follow_link(path->dentry, nd);
50891 if (err)
50892 goto loop;
50893+
50894+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
50895+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
50896+ err = -EACCES;
50897+ goto loop;
50898+ }
50899+
50900 current->link_count++;
50901 current->total_link_count++;
50902 nd->depth++;
50903@@ -1016,11 +1024,19 @@ return_reval:
50904 break;
50905 }
50906 return_base:
50907+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
50908+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
50909+ path_put(&nd->path);
50910+ return -ENOENT;
50911+ }
50912 return 0;
50913 out_dput:
50914 path_put_conditional(&next, nd);
50915 break;
50916 }
50917+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
50918+ err = -ENOENT;
50919+
50920 path_put(&nd->path);
50921 return_err:
50922 return err;
50923@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
50924 int retval = path_init(dfd, name, flags, nd);
50925 if (!retval)
50926 retval = path_walk(name, nd);
50927- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
50928- nd->path.dentry->d_inode))
50929- audit_inode(name, nd->path.dentry);
50930+
50931+ if (likely(!retval)) {
50932+ if (nd->path.dentry && nd->path.dentry->d_inode) {
50933+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
50934+ retval = -ENOENT;
50935+ if (!audit_dummy_context())
50936+ audit_inode(name, nd->path.dentry);
50937+ }
50938+ }
50939 if (nd->root.mnt) {
50940 path_put(&nd->root);
50941 nd->root.mnt = NULL;
50942 }
50943+
50944 return retval;
50945 }
50946
50947@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
50948 if (error)
50949 goto err_out;
50950
50951+
50952+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
50953+ error = -EPERM;
50954+ goto err_out;
50955+ }
50956+ if (gr_handle_rawio(inode)) {
50957+ error = -EPERM;
50958+ goto err_out;
50959+ }
50960+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
50961+ error = -EACCES;
50962+ goto err_out;
50963+ }
50964+
50965 if (flag & O_TRUNC) {
50966 error = get_write_access(inode);
50967 if (error)
50968@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
50969 {
50970 int error;
50971 struct dentry *dir = nd->path.dentry;
50972+ int acc_mode = ACC_MODE(flag);
50973+
50974+ if (flag & O_TRUNC)
50975+ acc_mode |= MAY_WRITE;
50976+ if (flag & O_APPEND)
50977+ acc_mode |= MAY_APPEND;
50978+
50979+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
50980+ error = -EACCES;
50981+ goto out_unlock;
50982+ }
50983
50984 if (!IS_POSIXACL(dir->d_inode))
50985 mode &= ~current_umask();
50986@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
50987 if (error)
50988 goto out_unlock;
50989 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
50990+ if (!error)
50991+ gr_handle_create(path->dentry, nd->path.mnt);
50992 out_unlock:
50993 mutex_unlock(&dir->d_inode->i_mutex);
50994 dput(nd->path.dentry);
50995@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
50996 &nd, flag);
50997 if (error)
50998 return ERR_PTR(error);
50999+
51000+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51001+ error = -EPERM;
51002+ goto exit;
51003+ }
51004+
51005+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51006+ error = -EPERM;
51007+ goto exit;
51008+ }
51009+
51010+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51011+ error = -EACCES;
51012+ goto exit;
51013+ }
51014+
51015 goto ok;
51016 }
51017
51018@@ -1795,6 +1861,19 @@ do_last:
51019 /*
51020 * It already exists.
51021 */
51022+
51023+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51024+ error = -ENOENT;
51025+ goto exit_mutex_unlock;
51026+ }
51027+
51028+ /* only check if O_CREAT is specified, all other checks need
51029+ to go into may_open */
51030+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51031+ error = -EACCES;
51032+ goto exit_mutex_unlock;
51033+ }
51034+
51035 mutex_unlock(&dir->d_inode->i_mutex);
51036 audit_inode(pathname, path.dentry);
51037
51038@@ -1887,6 +1966,13 @@ do_link:
51039 error = security_inode_follow_link(path.dentry, &nd);
51040 if (error)
51041 goto exit_dput;
51042+
51043+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51044+ path.dentry, nd.path.mnt)) {
51045+ error = -EACCES;
51046+ goto exit_dput;
51047+ }
51048+
51049 error = __do_follow_link(&path, &nd);
51050 if (error) {
51051 /* Does someone understand code flow here? Or it is only
51052@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51053 }
51054 return dentry;
51055 eexist:
51056+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51057+ dput(dentry);
51058+ return ERR_PTR(-ENOENT);
51059+ }
51060 dput(dentry);
51061 dentry = ERR_PTR(-EEXIST);
51062 fail:
51063@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51064 error = may_mknod(mode);
51065 if (error)
51066 goto out_dput;
51067+
51068+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51069+ error = -EPERM;
51070+ goto out_dput;
51071+ }
51072+
51073+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51074+ error = -EACCES;
51075+ goto out_dput;
51076+ }
51077+
51078 error = mnt_want_write(nd.path.mnt);
51079 if (error)
51080 goto out_dput;
51081@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51082 }
51083 out_drop_write:
51084 mnt_drop_write(nd.path.mnt);
51085+
51086+ if (!error)
51087+ gr_handle_create(dentry, nd.path.mnt);
51088 out_dput:
51089 dput(dentry);
51090 out_unlock:
51091@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51092 if (IS_ERR(dentry))
51093 goto out_unlock;
51094
51095+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51096+ error = -EACCES;
51097+ goto out_dput;
51098+ }
51099+
51100 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51101 mode &= ~current_umask();
51102 error = mnt_want_write(nd.path.mnt);
51103@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51104 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51105 out_drop_write:
51106 mnt_drop_write(nd.path.mnt);
51107+
51108+ if (!error)
51109+ gr_handle_create(dentry, nd.path.mnt);
51110+
51111 out_dput:
51112 dput(dentry);
51113 out_unlock:
51114@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51115 char * name;
51116 struct dentry *dentry;
51117 struct nameidata nd;
51118+ ino_t saved_ino = 0;
51119+ dev_t saved_dev = 0;
51120
51121 error = user_path_parent(dfd, pathname, &nd, &name);
51122 if (error)
51123@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51124 error = PTR_ERR(dentry);
51125 if (IS_ERR(dentry))
51126 goto exit2;
51127+
51128+ if (dentry->d_inode != NULL) {
51129+ saved_ino = dentry->d_inode->i_ino;
51130+ saved_dev = gr_get_dev_from_dentry(dentry);
51131+
51132+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51133+ error = -EACCES;
51134+ goto exit3;
51135+ }
51136+ }
51137+
51138 error = mnt_want_write(nd.path.mnt);
51139 if (error)
51140 goto exit3;
51141@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51142 if (error)
51143 goto exit4;
51144 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51145+ if (!error && (saved_dev || saved_ino))
51146+ gr_handle_delete(saved_ino, saved_dev);
51147 exit4:
51148 mnt_drop_write(nd.path.mnt);
51149 exit3:
51150@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51151 struct dentry *dentry;
51152 struct nameidata nd;
51153 struct inode *inode = NULL;
51154+ ino_t saved_ino = 0;
51155+ dev_t saved_dev = 0;
51156
51157 error = user_path_parent(dfd, pathname, &nd, &name);
51158 if (error)
51159@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51160 if (nd.last.name[nd.last.len])
51161 goto slashes;
51162 inode = dentry->d_inode;
51163- if (inode)
51164+ if (inode) {
51165+ if (inode->i_nlink <= 1) {
51166+ saved_ino = inode->i_ino;
51167+ saved_dev = gr_get_dev_from_dentry(dentry);
51168+ }
51169+
51170 atomic_inc(&inode->i_count);
51171+
51172+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51173+ error = -EACCES;
51174+ goto exit2;
51175+ }
51176+ }
51177 error = mnt_want_write(nd.path.mnt);
51178 if (error)
51179 goto exit2;
51180@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51181 if (error)
51182 goto exit3;
51183 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51184+ if (!error && (saved_ino || saved_dev))
51185+ gr_handle_delete(saved_ino, saved_dev);
51186 exit3:
51187 mnt_drop_write(nd.path.mnt);
51188 exit2:
51189@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51190 if (IS_ERR(dentry))
51191 goto out_unlock;
51192
51193+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51194+ error = -EACCES;
51195+ goto out_dput;
51196+ }
51197+
51198 error = mnt_want_write(nd.path.mnt);
51199 if (error)
51200 goto out_dput;
51201@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51202 if (error)
51203 goto out_drop_write;
51204 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51205+ if (!error)
51206+ gr_handle_create(dentry, nd.path.mnt);
51207 out_drop_write:
51208 mnt_drop_write(nd.path.mnt);
51209 out_dput:
51210@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51211 error = PTR_ERR(new_dentry);
51212 if (IS_ERR(new_dentry))
51213 goto out_unlock;
51214+
51215+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51216+ old_path.dentry->d_inode,
51217+ old_path.dentry->d_inode->i_mode, to)) {
51218+ error = -EACCES;
51219+ goto out_dput;
51220+ }
51221+
51222+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51223+ old_path.dentry, old_path.mnt, to)) {
51224+ error = -EACCES;
51225+ goto out_dput;
51226+ }
51227+
51228 error = mnt_want_write(nd.path.mnt);
51229 if (error)
51230 goto out_dput;
51231@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51232 if (error)
51233 goto out_drop_write;
51234 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51235+ if (!error)
51236+ gr_handle_create(new_dentry, nd.path.mnt);
51237 out_drop_write:
51238 mnt_drop_write(nd.path.mnt);
51239 out_dput:
51240@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51241 char *to;
51242 int error;
51243
51244+ pax_track_stack();
51245+
51246 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51247 if (error)
51248 goto exit;
51249@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51250 if (new_dentry == trap)
51251 goto exit5;
51252
51253+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51254+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
51255+ to);
51256+ if (error)
51257+ goto exit5;
51258+
51259 error = mnt_want_write(oldnd.path.mnt);
51260 if (error)
51261 goto exit5;
51262@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51263 goto exit6;
51264 error = vfs_rename(old_dir->d_inode, old_dentry,
51265 new_dir->d_inode, new_dentry);
51266+ if (!error)
51267+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51268+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51269 exit6:
51270 mnt_drop_write(oldnd.path.mnt);
51271 exit5:
51272@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51273
51274 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51275 {
51276+ char tmpbuf[64];
51277+ const char *newlink;
51278 int len;
51279
51280 len = PTR_ERR(link);
51281@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51282 len = strlen(link);
51283 if (len > (unsigned) buflen)
51284 len = buflen;
51285- if (copy_to_user(buffer, link, len))
51286+
51287+ if (len < sizeof(tmpbuf)) {
51288+ memcpy(tmpbuf, link, len);
51289+ newlink = tmpbuf;
51290+ } else
51291+ newlink = link;
51292+
51293+ if (copy_to_user(buffer, newlink, len))
51294 len = -EFAULT;
51295 out:
51296 return len;
51297diff --git a/fs/namespace.c b/fs/namespace.c
51298index 2beb0fb..11a95a5 100644
51299--- a/fs/namespace.c
51300+++ b/fs/namespace.c
51301@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51302 if (!(sb->s_flags & MS_RDONLY))
51303 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51304 up_write(&sb->s_umount);
51305+
51306+ gr_log_remount(mnt->mnt_devname, retval);
51307+
51308 return retval;
51309 }
51310
51311@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51312 security_sb_umount_busy(mnt);
51313 up_write(&namespace_sem);
51314 release_mounts(&umount_list);
51315+
51316+ gr_log_unmount(mnt->mnt_devname, retval);
51317+
51318 return retval;
51319 }
51320
51321@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51322 if (retval)
51323 goto dput_out;
51324
51325+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51326+ retval = -EPERM;
51327+ goto dput_out;
51328+ }
51329+
51330+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51331+ retval = -EPERM;
51332+ goto dput_out;
51333+ }
51334+
51335 if (flags & MS_REMOUNT)
51336 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51337 data_page);
51338@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51339 dev_name, data_page);
51340 dput_out:
51341 path_put(&path);
51342+
51343+ gr_log_mount(dev_name, dir_name, retval);
51344+
51345 return retval;
51346 }
51347
51348@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51349 goto out1;
51350 }
51351
51352+ if (gr_handle_chroot_pivot()) {
51353+ error = -EPERM;
51354+ path_put(&old);
51355+ goto out1;
51356+ }
51357+
51358 read_lock(&current->fs->lock);
51359 root = current->fs->root;
51360 path_get(&current->fs->root);
51361diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51362index b8b5b30..2bd9ccb 100644
51363--- a/fs/ncpfs/dir.c
51364+++ b/fs/ncpfs/dir.c
51365@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51366 int res, val = 0, len;
51367 __u8 __name[NCP_MAXPATHLEN + 1];
51368
51369+ pax_track_stack();
51370+
51371 parent = dget_parent(dentry);
51372 dir = parent->d_inode;
51373
51374@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51375 int error, res, len;
51376 __u8 __name[NCP_MAXPATHLEN + 1];
51377
51378+ pax_track_stack();
51379+
51380 lock_kernel();
51381 error = -EIO;
51382 if (!ncp_conn_valid(server))
51383@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51384 int error, result, len;
51385 int opmode;
51386 __u8 __name[NCP_MAXPATHLEN + 1];
51387-
51388+
51389 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51390 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51391
51392+ pax_track_stack();
51393+
51394 error = -EIO;
51395 lock_kernel();
51396 if (!ncp_conn_valid(server))
51397@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51398 int error, len;
51399 __u8 __name[NCP_MAXPATHLEN + 1];
51400
51401+ pax_track_stack();
51402+
51403 DPRINTK("ncp_mkdir: making %s/%s\n",
51404 dentry->d_parent->d_name.name, dentry->d_name.name);
51405
51406@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51407 if (!ncp_conn_valid(server))
51408 goto out;
51409
51410+ pax_track_stack();
51411+
51412 ncp_age_dentry(server, dentry);
51413 len = sizeof(__name);
51414 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51415@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51416 int old_len, new_len;
51417 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51418
51419+ pax_track_stack();
51420+
51421 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51422 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51423 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51424diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51425index cf98da1..da890a9 100644
51426--- a/fs/ncpfs/inode.c
51427+++ b/fs/ncpfs/inode.c
51428@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51429 #endif
51430 struct ncp_entry_info finfo;
51431
51432+ pax_track_stack();
51433+
51434 data.wdog_pid = NULL;
51435 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51436 if (!server)
51437diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51438index bfaef7b..e9d03ca 100644
51439--- a/fs/nfs/inode.c
51440+++ b/fs/nfs/inode.c
51441@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51442 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51443 nfsi->attrtimeo_timestamp = jiffies;
51444
51445- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51446+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51447 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51448 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51449 else
51450@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51451 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51452 }
51453
51454-static atomic_long_t nfs_attr_generation_counter;
51455+static atomic_long_unchecked_t nfs_attr_generation_counter;
51456
51457 static unsigned long nfs_read_attr_generation_counter(void)
51458 {
51459- return atomic_long_read(&nfs_attr_generation_counter);
51460+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51461 }
51462
51463 unsigned long nfs_inc_attr_generation_counter(void)
51464 {
51465- return atomic_long_inc_return(&nfs_attr_generation_counter);
51466+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51467 }
51468
51469 void nfs_fattr_init(struct nfs_fattr *fattr)
51470diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51471index cc2f505..f6a236f 100644
51472--- a/fs/nfsd/lockd.c
51473+++ b/fs/nfsd/lockd.c
51474@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51475 fput(filp);
51476 }
51477
51478-static struct nlmsvc_binding nfsd_nlm_ops = {
51479+static const struct nlmsvc_binding nfsd_nlm_ops = {
51480 .fopen = nlm_fopen, /* open file for locking */
51481 .fclose = nlm_fclose, /* close file */
51482 };
51483diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51484index cfc3391..dcc083a 100644
51485--- a/fs/nfsd/nfs4state.c
51486+++ b/fs/nfsd/nfs4state.c
51487@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51488 unsigned int cmd;
51489 int err;
51490
51491+ pax_track_stack();
51492+
51493 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51494 (long long) lock->lk_offset,
51495 (long long) lock->lk_length);
51496diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51497index 4a82a96..0d5fb49 100644
51498--- a/fs/nfsd/nfs4xdr.c
51499+++ b/fs/nfsd/nfs4xdr.c
51500@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
51501 struct nfsd4_compoundres *resp = rqstp->rq_resp;
51502 u32 minorversion = resp->cstate.minorversion;
51503
51504+ pax_track_stack();
51505+
51506 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
51507 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
51508 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
51509diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
51510index 2e09588..596421d 100644
51511--- a/fs/nfsd/vfs.c
51512+++ b/fs/nfsd/vfs.c
51513@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51514 } else {
51515 oldfs = get_fs();
51516 set_fs(KERNEL_DS);
51517- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
51518+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
51519 set_fs(oldfs);
51520 }
51521
51522@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51523
51524 /* Write the data. */
51525 oldfs = get_fs(); set_fs(KERNEL_DS);
51526- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
51527+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
51528 set_fs(oldfs);
51529 if (host_err < 0)
51530 goto out_nfserr;
51531@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
51532 */
51533
51534 oldfs = get_fs(); set_fs(KERNEL_DS);
51535- host_err = inode->i_op->readlink(dentry, buf, *lenp);
51536+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
51537 set_fs(oldfs);
51538
51539 if (host_err < 0)
51540diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
51541index f6af760..d6b2b83 100644
51542--- a/fs/nilfs2/ioctl.c
51543+++ b/fs/nilfs2/ioctl.c
51544@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
51545 unsigned int cmd, void __user *argp)
51546 {
51547 struct nilfs_argv argv[5];
51548- const static size_t argsz[5] = {
51549+ static const size_t argsz[5] = {
51550 sizeof(struct nilfs_vdesc),
51551 sizeof(struct nilfs_period),
51552 sizeof(__u64),
51553diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
51554index 7e54e52..9337248 100644
51555--- a/fs/notify/dnotify/dnotify.c
51556+++ b/fs/notify/dnotify/dnotify.c
51557@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
51558 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
51559 }
51560
51561-static struct fsnotify_ops dnotify_fsnotify_ops = {
51562+static const struct fsnotify_ops dnotify_fsnotify_ops = {
51563 .handle_event = dnotify_handle_event,
51564 .should_send_event = dnotify_should_send_event,
51565 .free_group_priv = NULL,
51566diff --git a/fs/notify/notification.c b/fs/notify/notification.c
51567index b8bf53b..c518688 100644
51568--- a/fs/notify/notification.c
51569+++ b/fs/notify/notification.c
51570@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
51571 * get set to 0 so it will never get 'freed'
51572 */
51573 static struct fsnotify_event q_overflow_event;
51574-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51575+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51576
51577 /**
51578 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
51579@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51580 */
51581 u32 fsnotify_get_cookie(void)
51582 {
51583- return atomic_inc_return(&fsnotify_sync_cookie);
51584+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
51585 }
51586 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
51587
51588diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
51589index 5a9e344..0f8cd28 100644
51590--- a/fs/ntfs/dir.c
51591+++ b/fs/ntfs/dir.c
51592@@ -1328,7 +1328,7 @@ find_next_index_buffer:
51593 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
51594 ~(s64)(ndir->itype.index.block_size - 1)));
51595 /* Bounds checks. */
51596- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51597+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51598 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
51599 "inode 0x%lx or driver bug.", vdir->i_ino);
51600 goto err_out;
51601diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
51602index 663c0e3..b6868e9 100644
51603--- a/fs/ntfs/file.c
51604+++ b/fs/ntfs/file.c
51605@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
51606 #endif /* NTFS_RW */
51607 };
51608
51609-const struct file_operations ntfs_empty_file_ops = {};
51610+const struct file_operations ntfs_empty_file_ops __read_only;
51611
51612-const struct inode_operations ntfs_empty_inode_ops = {};
51613+const struct inode_operations ntfs_empty_inode_ops __read_only;
51614diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
51615index 1cd2934..880b5d2 100644
51616--- a/fs/ocfs2/cluster/masklog.c
51617+++ b/fs/ocfs2/cluster/masklog.c
51618@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
51619 return mlog_mask_store(mlog_attr->mask, buf, count);
51620 }
51621
51622-static struct sysfs_ops mlog_attr_ops = {
51623+static const struct sysfs_ops mlog_attr_ops = {
51624 .show = mlog_show,
51625 .store = mlog_store,
51626 };
51627diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
51628index ac10f83..2cd2607 100644
51629--- a/fs/ocfs2/localalloc.c
51630+++ b/fs/ocfs2/localalloc.c
51631@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
51632 goto bail;
51633 }
51634
51635- atomic_inc(&osb->alloc_stats.moves);
51636+ atomic_inc_unchecked(&osb->alloc_stats.moves);
51637
51638 status = 0;
51639 bail:
51640diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
51641index f010b22..9f9ed34 100644
51642--- a/fs/ocfs2/namei.c
51643+++ b/fs/ocfs2/namei.c
51644@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
51645 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
51646 struct ocfs2_dir_lookup_result target_insert = { NULL, };
51647
51648+ pax_track_stack();
51649+
51650 /* At some point it might be nice to break this function up a
51651 * bit. */
51652
51653diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
51654index d963d86..914cfbd 100644
51655--- a/fs/ocfs2/ocfs2.h
51656+++ b/fs/ocfs2/ocfs2.h
51657@@ -217,11 +217,11 @@ enum ocfs2_vol_state
51658
51659 struct ocfs2_alloc_stats
51660 {
51661- atomic_t moves;
51662- atomic_t local_data;
51663- atomic_t bitmap_data;
51664- atomic_t bg_allocs;
51665- atomic_t bg_extends;
51666+ atomic_unchecked_t moves;
51667+ atomic_unchecked_t local_data;
51668+ atomic_unchecked_t bitmap_data;
51669+ atomic_unchecked_t bg_allocs;
51670+ atomic_unchecked_t bg_extends;
51671 };
51672
51673 enum ocfs2_local_alloc_state
51674diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
51675index 79b5dac..d322952 100644
51676--- a/fs/ocfs2/suballoc.c
51677+++ b/fs/ocfs2/suballoc.c
51678@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
51679 mlog_errno(status);
51680 goto bail;
51681 }
51682- atomic_inc(&osb->alloc_stats.bg_extends);
51683+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
51684
51685 /* You should never ask for this much metadata */
51686 BUG_ON(bits_wanted >
51687@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
51688 mlog_errno(status);
51689 goto bail;
51690 }
51691- atomic_inc(&osb->alloc_stats.bg_allocs);
51692+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51693
51694 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
51695 ac->ac_bits_given += (*num_bits);
51696@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
51697 mlog_errno(status);
51698 goto bail;
51699 }
51700- atomic_inc(&osb->alloc_stats.bg_allocs);
51701+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51702
51703 BUG_ON(num_bits != 1);
51704
51705@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51706 cluster_start,
51707 num_clusters);
51708 if (!status)
51709- atomic_inc(&osb->alloc_stats.local_data);
51710+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
51711 } else {
51712 if (min_clusters > (osb->bitmap_cpg - 1)) {
51713 /* The only paths asking for contiguousness
51714@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51715 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
51716 bg_blkno,
51717 bg_bit_off);
51718- atomic_inc(&osb->alloc_stats.bitmap_data);
51719+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
51720 }
51721 }
51722 if (status < 0) {
51723diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
51724index 9f55be4..a3f8048 100644
51725--- a/fs/ocfs2/super.c
51726+++ b/fs/ocfs2/super.c
51727@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
51728 "%10s => GlobalAllocs: %d LocalAllocs: %d "
51729 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
51730 "Stats",
51731- atomic_read(&osb->alloc_stats.bitmap_data),
51732- atomic_read(&osb->alloc_stats.local_data),
51733- atomic_read(&osb->alloc_stats.bg_allocs),
51734- atomic_read(&osb->alloc_stats.moves),
51735- atomic_read(&osb->alloc_stats.bg_extends));
51736+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
51737+ atomic_read_unchecked(&osb->alloc_stats.local_data),
51738+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
51739+ atomic_read_unchecked(&osb->alloc_stats.moves),
51740+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
51741
51742 out += snprintf(buf + out, len - out,
51743 "%10s => State: %u Descriptor: %llu Size: %u bits "
51744@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
51745 spin_lock_init(&osb->osb_xattr_lock);
51746 ocfs2_init_inode_steal_slot(osb);
51747
51748- atomic_set(&osb->alloc_stats.moves, 0);
51749- atomic_set(&osb->alloc_stats.local_data, 0);
51750- atomic_set(&osb->alloc_stats.bitmap_data, 0);
51751- atomic_set(&osb->alloc_stats.bg_allocs, 0);
51752- atomic_set(&osb->alloc_stats.bg_extends, 0);
51753+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
51754+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
51755+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
51756+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
51757+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
51758
51759 /* Copy the blockcheck stats from the superblock probe */
51760 osb->osb_ecc_stats = *stats;
51761diff --git a/fs/open.c b/fs/open.c
51762index 4f01e06..091f6c3 100644
51763--- a/fs/open.c
51764+++ b/fs/open.c
51765@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
51766 error = locks_verify_truncate(inode, NULL, length);
51767 if (!error)
51768 error = security_path_truncate(&path, length, 0);
51769+
51770+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
51771+ error = -EACCES;
51772+
51773 if (!error) {
51774 vfs_dq_init(inode);
51775 error = do_truncate(path.dentry, length, 0, NULL);
51776@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
51777 if (__mnt_is_readonly(path.mnt))
51778 res = -EROFS;
51779
51780+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
51781+ res = -EACCES;
51782+
51783 out_path_release:
51784 path_put(&path);
51785 out:
51786@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
51787 if (error)
51788 goto dput_and_out;
51789
51790+ gr_log_chdir(path.dentry, path.mnt);
51791+
51792 set_fs_pwd(current->fs, &path);
51793
51794 dput_and_out:
51795@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
51796 goto out_putf;
51797
51798 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
51799+
51800+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
51801+ error = -EPERM;
51802+
51803+ if (!error)
51804+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
51805+
51806 if (!error)
51807 set_fs_pwd(current->fs, &file->f_path);
51808 out_putf:
51809@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
51810 if (!capable(CAP_SYS_CHROOT))
51811 goto dput_and_out;
51812
51813+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
51814+ goto dput_and_out;
51815+
51816 set_fs_root(current->fs, &path);
51817+
51818+ gr_handle_chroot_chdir(&path);
51819+
51820 error = 0;
51821 dput_and_out:
51822 path_put(&path);
51823@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
51824 err = mnt_want_write_file(file);
51825 if (err)
51826 goto out_putf;
51827+
51828 mutex_lock(&inode->i_mutex);
51829+
51830+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
51831+ err = -EACCES;
51832+ goto out_unlock;
51833+ }
51834+
51835 if (mode == (mode_t) -1)
51836 mode = inode->i_mode;
51837+
51838+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
51839+ err = -EPERM;
51840+ goto out_unlock;
51841+ }
51842+
51843 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
51844 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
51845 err = notify_change(dentry, &newattrs);
51846+
51847+out_unlock:
51848 mutex_unlock(&inode->i_mutex);
51849 mnt_drop_write(file->f_path.mnt);
51850 out_putf:
51851@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
51852 error = mnt_want_write(path.mnt);
51853 if (error)
51854 goto dput_and_out;
51855+
51856 mutex_lock(&inode->i_mutex);
51857+
51858+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
51859+ error = -EACCES;
51860+ goto out_unlock;
51861+ }
51862+
51863 if (mode == (mode_t) -1)
51864 mode = inode->i_mode;
51865+
51866+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
51867+ error = -EACCES;
51868+ goto out_unlock;
51869+ }
51870+
51871 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
51872 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
51873 error = notify_change(path.dentry, &newattrs);
51874+
51875+out_unlock:
51876 mutex_unlock(&inode->i_mutex);
51877 mnt_drop_write(path.mnt);
51878 dput_and_out:
51879@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
51880 return sys_fchmodat(AT_FDCWD, filename, mode);
51881 }
51882
51883-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
51884+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
51885 {
51886 struct inode *inode = dentry->d_inode;
51887 int error;
51888 struct iattr newattrs;
51889
51890+ if (!gr_acl_handle_chown(dentry, mnt))
51891+ return -EACCES;
51892+
51893 newattrs.ia_valid = ATTR_CTIME;
51894 if (user != (uid_t) -1) {
51895 newattrs.ia_valid |= ATTR_UID;
51896@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
51897 error = mnt_want_write(path.mnt);
51898 if (error)
51899 goto out_release;
51900- error = chown_common(path.dentry, user, group);
51901+ error = chown_common(path.dentry, user, group, path.mnt);
51902 mnt_drop_write(path.mnt);
51903 out_release:
51904 path_put(&path);
51905@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
51906 error = mnt_want_write(path.mnt);
51907 if (error)
51908 goto out_release;
51909- error = chown_common(path.dentry, user, group);
51910+ error = chown_common(path.dentry, user, group, path.mnt);
51911 mnt_drop_write(path.mnt);
51912 out_release:
51913 path_put(&path);
51914@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
51915 error = mnt_want_write(path.mnt);
51916 if (error)
51917 goto out_release;
51918- error = chown_common(path.dentry, user, group);
51919+ error = chown_common(path.dentry, user, group, path.mnt);
51920 mnt_drop_write(path.mnt);
51921 out_release:
51922 path_put(&path);
51923@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
51924 goto out_fput;
51925 dentry = file->f_path.dentry;
51926 audit_inode(NULL, dentry);
51927- error = chown_common(dentry, user, group);
51928+ error = chown_common(dentry, user, group, file->f_path.mnt);
51929 mnt_drop_write(file->f_path.mnt);
51930 out_fput:
51931 fput(file);
51932@@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
51933 if (!IS_ERR(tmp)) {
51934 fd = get_unused_fd_flags(flags);
51935 if (fd >= 0) {
51936- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
51937+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
51938 if (IS_ERR(f)) {
51939 put_unused_fd(fd);
51940 fd = PTR_ERR(f);
51941diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
51942index dd6efdb..3babc6c 100644
51943--- a/fs/partitions/ldm.c
51944+++ b/fs/partitions/ldm.c
51945@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
51946 ldm_error ("A VBLK claims to have %d parts.", num);
51947 return false;
51948 }
51949+
51950 if (rec >= num) {
51951 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
51952 return false;
51953@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
51954 goto found;
51955 }
51956
51957- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
51958+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
51959 if (!f) {
51960 ldm_crit ("Out of memory.");
51961 return false;
51962diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
51963index 5765198..7f8e9e0 100644
51964--- a/fs/partitions/mac.c
51965+++ b/fs/partitions/mac.c
51966@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
51967 return 0; /* not a MacOS disk */
51968 }
51969 blocks_in_map = be32_to_cpu(part->map_count);
51970+ printk(" [mac]");
51971 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
51972 put_dev_sector(sect);
51973 return 0;
51974 }
51975- printk(" [mac]");
51976 for (slot = 1; slot <= blocks_in_map; ++slot) {
51977 int pos = slot * secsize;
51978 put_dev_sector(sect);
51979diff --git a/fs/pipe.c b/fs/pipe.c
51980index d0cc080..8a6f211 100644
51981--- a/fs/pipe.c
51982+++ b/fs/pipe.c
51983@@ -401,9 +401,9 @@ redo:
51984 }
51985 if (bufs) /* More to do? */
51986 continue;
51987- if (!pipe->writers)
51988+ if (!atomic_read(&pipe->writers))
51989 break;
51990- if (!pipe->waiting_writers) {
51991+ if (!atomic_read(&pipe->waiting_writers)) {
51992 /* syscall merging: Usually we must not sleep
51993 * if O_NONBLOCK is set, or if we got some data.
51994 * But if a writer sleeps in kernel space, then
51995@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
51996 mutex_lock(&inode->i_mutex);
51997 pipe = inode->i_pipe;
51998
51999- if (!pipe->readers) {
52000+ if (!atomic_read(&pipe->readers)) {
52001 send_sig(SIGPIPE, current, 0);
52002 ret = -EPIPE;
52003 goto out;
52004@@ -511,7 +511,7 @@ redo1:
52005 for (;;) {
52006 int bufs;
52007
52008- if (!pipe->readers) {
52009+ if (!atomic_read(&pipe->readers)) {
52010 send_sig(SIGPIPE, current, 0);
52011 if (!ret)
52012 ret = -EPIPE;
52013@@ -597,9 +597,9 @@ redo2:
52014 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52015 do_wakeup = 0;
52016 }
52017- pipe->waiting_writers++;
52018+ atomic_inc(&pipe->waiting_writers);
52019 pipe_wait(pipe);
52020- pipe->waiting_writers--;
52021+ atomic_dec(&pipe->waiting_writers);
52022 }
52023 out:
52024 mutex_unlock(&inode->i_mutex);
52025@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52026 mask = 0;
52027 if (filp->f_mode & FMODE_READ) {
52028 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52029- if (!pipe->writers && filp->f_version != pipe->w_counter)
52030+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52031 mask |= POLLHUP;
52032 }
52033
52034@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52035 * Most Unices do not set POLLERR for FIFOs but on Linux they
52036 * behave exactly like pipes for poll().
52037 */
52038- if (!pipe->readers)
52039+ if (!atomic_read(&pipe->readers))
52040 mask |= POLLERR;
52041 }
52042
52043@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52044
52045 mutex_lock(&inode->i_mutex);
52046 pipe = inode->i_pipe;
52047- pipe->readers -= decr;
52048- pipe->writers -= decw;
52049+ atomic_sub(decr, &pipe->readers);
52050+ atomic_sub(decw, &pipe->writers);
52051
52052- if (!pipe->readers && !pipe->writers) {
52053+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52054 free_pipe_info(inode);
52055 } else {
52056 wake_up_interruptible_sync(&pipe->wait);
52057@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52058
52059 if (inode->i_pipe) {
52060 ret = 0;
52061- inode->i_pipe->readers++;
52062+ atomic_inc(&inode->i_pipe->readers);
52063 }
52064
52065 mutex_unlock(&inode->i_mutex);
52066@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52067
52068 if (inode->i_pipe) {
52069 ret = 0;
52070- inode->i_pipe->writers++;
52071+ atomic_inc(&inode->i_pipe->writers);
52072 }
52073
52074 mutex_unlock(&inode->i_mutex);
52075@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52076 if (inode->i_pipe) {
52077 ret = 0;
52078 if (filp->f_mode & FMODE_READ)
52079- inode->i_pipe->readers++;
52080+ atomic_inc(&inode->i_pipe->readers);
52081 if (filp->f_mode & FMODE_WRITE)
52082- inode->i_pipe->writers++;
52083+ atomic_inc(&inode->i_pipe->writers);
52084 }
52085
52086 mutex_unlock(&inode->i_mutex);
52087@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52088 inode->i_pipe = NULL;
52089 }
52090
52091-static struct vfsmount *pipe_mnt __read_mostly;
52092+struct vfsmount *pipe_mnt __read_mostly;
52093 static int pipefs_delete_dentry(struct dentry *dentry)
52094 {
52095 /*
52096@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52097 goto fail_iput;
52098 inode->i_pipe = pipe;
52099
52100- pipe->readers = pipe->writers = 1;
52101+ atomic_set(&pipe->readers, 1);
52102+ atomic_set(&pipe->writers, 1);
52103 inode->i_fop = &rdwr_pipefifo_fops;
52104
52105 /*
52106diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52107index 50f8f06..c5755df 100644
52108--- a/fs/proc/Kconfig
52109+++ b/fs/proc/Kconfig
52110@@ -30,12 +30,12 @@ config PROC_FS
52111
52112 config PROC_KCORE
52113 bool "/proc/kcore support" if !ARM
52114- depends on PROC_FS && MMU
52115+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52116
52117 config PROC_VMCORE
52118 bool "/proc/vmcore support (EXPERIMENTAL)"
52119- depends on PROC_FS && CRASH_DUMP
52120- default y
52121+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52122+ default n
52123 help
52124 Exports the dump image of crashed kernel in ELF format.
52125
52126@@ -59,8 +59,8 @@ config PROC_SYSCTL
52127 limited in memory.
52128
52129 config PROC_PAGE_MONITOR
52130- default y
52131- depends on PROC_FS && MMU
52132+ default n
52133+ depends on PROC_FS && MMU && !GRKERNSEC
52134 bool "Enable /proc page monitoring" if EMBEDDED
52135 help
52136 Various /proc files exist to monitor process memory utilization:
52137diff --git a/fs/proc/array.c b/fs/proc/array.c
52138index c5ef152..1363194 100644
52139--- a/fs/proc/array.c
52140+++ b/fs/proc/array.c
52141@@ -60,6 +60,7 @@
52142 #include <linux/tty.h>
52143 #include <linux/string.h>
52144 #include <linux/mman.h>
52145+#include <linux/grsecurity.h>
52146 #include <linux/proc_fs.h>
52147 #include <linux/ioport.h>
52148 #include <linux/uaccess.h>
52149@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52150 p->nivcsw);
52151 }
52152
52153+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52154+static inline void task_pax(struct seq_file *m, struct task_struct *p)
52155+{
52156+ if (p->mm)
52157+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52158+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52159+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52160+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52161+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52162+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52163+ else
52164+ seq_printf(m, "PaX:\t-----\n");
52165+}
52166+#endif
52167+
52168 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52169 struct pid *pid, struct task_struct *task)
52170 {
52171@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52172 task_cap(m, task);
52173 cpuset_task_status_allowed(m, task);
52174 task_context_switch_counts(m, task);
52175+
52176+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52177+ task_pax(m, task);
52178+#endif
52179+
52180+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52181+ task_grsec_rbac(m, task);
52182+#endif
52183+
52184 return 0;
52185 }
52186
52187+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52188+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52189+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52190+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52191+#endif
52192+
52193 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52194 struct pid *pid, struct task_struct *task, int whole)
52195 {
52196@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52197 cputime_t cutime, cstime, utime, stime;
52198 cputime_t cgtime, gtime;
52199 unsigned long rsslim = 0;
52200- char tcomm[sizeof(task->comm)];
52201+ char tcomm[sizeof(task->comm)] = { 0 };
52202 unsigned long flags;
52203
52204+ pax_track_stack();
52205+
52206 state = *get_task_state(task);
52207 vsize = eip = esp = 0;
52208 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52209@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52210 gtime = task_gtime(task);
52211 }
52212
52213+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52214+ if (PAX_RAND_FLAGS(mm)) {
52215+ eip = 0;
52216+ esp = 0;
52217+ wchan = 0;
52218+ }
52219+#endif
52220+#ifdef CONFIG_GRKERNSEC_HIDESYM
52221+ wchan = 0;
52222+ eip =0;
52223+ esp =0;
52224+#endif
52225+
52226 /* scale priority and nice values from timeslices to -20..20 */
52227 /* to make it look like a "normal" Unix priority/nice value */
52228 priority = task_prio(task);
52229@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52230 vsize,
52231 mm ? get_mm_rss(mm) : 0,
52232 rsslim,
52233+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52234+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52235+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52236+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52237+#else
52238 mm ? (permitted ? mm->start_code : 1) : 0,
52239 mm ? (permitted ? mm->end_code : 1) : 0,
52240 (permitted && mm) ? mm->start_stack : 0,
52241+#endif
52242 esp,
52243 eip,
52244 /* The signal information here is obsolete.
52245@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52246
52247 return 0;
52248 }
52249+
52250+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52251+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52252+{
52253+ u32 curr_ip = 0;
52254+ unsigned long flags;
52255+
52256+ if (lock_task_sighand(task, &flags)) {
52257+ curr_ip = task->signal->curr_ip;
52258+ unlock_task_sighand(task, &flags);
52259+ }
52260+
52261+ return sprintf(buffer, "%pI4\n", &curr_ip);
52262+}
52263+#endif
52264diff --git a/fs/proc/base.c b/fs/proc/base.c
52265index 67f7dc0..7171c9a 100644
52266--- a/fs/proc/base.c
52267+++ b/fs/proc/base.c
52268@@ -102,6 +102,22 @@ struct pid_entry {
52269 union proc_op op;
52270 };
52271
52272+struct getdents_callback {
52273+ struct linux_dirent __user * current_dir;
52274+ struct linux_dirent __user * previous;
52275+ struct file * file;
52276+ int count;
52277+ int error;
52278+};
52279+
52280+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52281+ loff_t offset, u64 ino, unsigned int d_type)
52282+{
52283+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
52284+ buf->error = -EINVAL;
52285+ return 0;
52286+}
52287+
52288 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52289 .name = (NAME), \
52290 .len = sizeof(NAME) - 1, \
52291@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52292 if (task == current)
52293 return 0;
52294
52295+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52296+ return -EPERM;
52297+
52298 /*
52299 * If current is actively ptrace'ing, and would also be
52300 * permitted to freshly attach with ptrace now, permit it.
52301@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52302 if (!mm->arg_end)
52303 goto out_mm; /* Shh! No looking before we're done */
52304
52305+ if (gr_acl_handle_procpidmem(task))
52306+ goto out_mm;
52307+
52308 len = mm->arg_end - mm->arg_start;
52309
52310 if (len > PAGE_SIZE)
52311@@ -287,12 +309,28 @@ out:
52312 return res;
52313 }
52314
52315+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52316+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52317+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52318+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52319+#endif
52320+
52321 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52322 {
52323 int res = 0;
52324 struct mm_struct *mm = get_task_mm(task);
52325 if (mm) {
52326 unsigned int nwords = 0;
52327+
52328+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52329+ /* allow if we're currently ptracing this task */
52330+ if (PAX_RAND_FLAGS(mm) &&
52331+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52332+ mmput(mm);
52333+ return 0;
52334+ }
52335+#endif
52336+
52337 do {
52338 nwords += 2;
52339 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52340@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52341 }
52342
52343
52344-#ifdef CONFIG_KALLSYMS
52345+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52346 /*
52347 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52348 * Returns the resolved symbol. If that fails, simply return the address.
52349@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52350 mutex_unlock(&task->cred_guard_mutex);
52351 }
52352
52353-#ifdef CONFIG_STACKTRACE
52354+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52355
52356 #define MAX_STACK_TRACE_DEPTH 64
52357
52358@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52359 return count;
52360 }
52361
52362-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52363+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52364 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52365 {
52366 long nr;
52367@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52368 /************************************************************************/
52369
52370 /* permission checks */
52371-static int proc_fd_access_allowed(struct inode *inode)
52372+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52373 {
52374 struct task_struct *task;
52375 int allowed = 0;
52376@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52377 */
52378 task = get_proc_task(inode);
52379 if (task) {
52380- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52381+ if (log)
52382+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52383+ else
52384+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52385 put_task_struct(task);
52386 }
52387 return allowed;
52388@@ -963,6 +1004,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52389 if (!task)
52390 goto out_no_task;
52391
52392+ if (gr_acl_handle_procpidmem(task))
52393+ goto out;
52394+
52395 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52396 goto out;
52397
52398@@ -1377,7 +1421,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52399 path_put(&nd->path);
52400
52401 /* Are we allowed to snoop on the tasks file descriptors? */
52402- if (!proc_fd_access_allowed(inode))
52403+ if (!proc_fd_access_allowed(inode,0))
52404 goto out;
52405
52406 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52407@@ -1417,8 +1461,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52408 struct path path;
52409
52410 /* Are we allowed to snoop on the tasks file descriptors? */
52411- if (!proc_fd_access_allowed(inode))
52412- goto out;
52413+ /* logging this is needed for learning on chromium to work properly,
52414+ but we don't want to flood the logs from 'ps' which does a readlink
52415+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52416+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
52417+ */
52418+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52419+ if (!proc_fd_access_allowed(inode,0))
52420+ goto out;
52421+ } else {
52422+ if (!proc_fd_access_allowed(inode,1))
52423+ goto out;
52424+ }
52425
52426 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52427 if (error)
52428@@ -1483,7 +1537,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52429 rcu_read_lock();
52430 cred = __task_cred(task);
52431 inode->i_uid = cred->euid;
52432+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52433+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52434+#else
52435 inode->i_gid = cred->egid;
52436+#endif
52437 rcu_read_unlock();
52438 }
52439 security_task_to_inode(task, inode);
52440@@ -1501,6 +1559,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52441 struct inode *inode = dentry->d_inode;
52442 struct task_struct *task;
52443 const struct cred *cred;
52444+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52445+ const struct cred *tmpcred = current_cred();
52446+#endif
52447
52448 generic_fillattr(inode, stat);
52449
52450@@ -1508,13 +1569,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52451 stat->uid = 0;
52452 stat->gid = 0;
52453 task = pid_task(proc_pid(inode), PIDTYPE_PID);
52454+
52455+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
52456+ rcu_read_unlock();
52457+ return -ENOENT;
52458+ }
52459+
52460 if (task) {
52461+ cred = __task_cred(task);
52462+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52463+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
52464+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52465+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52466+#endif
52467+ ) {
52468+#endif
52469 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52470+#ifdef CONFIG_GRKERNSEC_PROC_USER
52471+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52472+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52473+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52474+#endif
52475 task_dumpable(task)) {
52476- cred = __task_cred(task);
52477 stat->uid = cred->euid;
52478+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52479+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
52480+#else
52481 stat->gid = cred->egid;
52482+#endif
52483 }
52484+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52485+ } else {
52486+ rcu_read_unlock();
52487+ return -ENOENT;
52488+ }
52489+#endif
52490 }
52491 rcu_read_unlock();
52492 return 0;
52493@@ -1545,11 +1634,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
52494
52495 if (task) {
52496 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52497+#ifdef CONFIG_GRKERNSEC_PROC_USER
52498+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52499+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52500+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52501+#endif
52502 task_dumpable(task)) {
52503 rcu_read_lock();
52504 cred = __task_cred(task);
52505 inode->i_uid = cred->euid;
52506+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52507+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52508+#else
52509 inode->i_gid = cred->egid;
52510+#endif
52511 rcu_read_unlock();
52512 } else {
52513 inode->i_uid = 0;
52514@@ -1670,7 +1768,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
52515 int fd = proc_fd(inode);
52516
52517 if (task) {
52518- files = get_files_struct(task);
52519+ if (!gr_acl_handle_procpidmem(task))
52520+ files = get_files_struct(task);
52521 put_task_struct(task);
52522 }
52523 if (files) {
52524@@ -1922,12 +2021,22 @@ static const struct file_operations proc_fd_operations = {
52525 static int proc_fd_permission(struct inode *inode, int mask)
52526 {
52527 int rv;
52528+ struct task_struct *task;
52529
52530 rv = generic_permission(inode, mask, NULL);
52531- if (rv == 0)
52532- return 0;
52533+
52534 if (task_pid(current) == proc_pid(inode))
52535 rv = 0;
52536+
52537+ task = get_proc_task(inode);
52538+ if (task == NULL)
52539+ return rv;
52540+
52541+ if (gr_acl_handle_procpidmem(task))
52542+ rv = -EACCES;
52543+
52544+ put_task_struct(task);
52545+
52546 return rv;
52547 }
52548
52549@@ -2036,6 +2145,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
52550 if (!task)
52551 goto out_no_task;
52552
52553+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52554+ goto out;
52555+
52556 /*
52557 * Yes, it does not scale. And it should not. Don't add
52558 * new entries into /proc/<tgid>/ without very good reasons.
52559@@ -2080,6 +2192,9 @@ static int proc_pident_readdir(struct file *filp,
52560 if (!task)
52561 goto out_no_task;
52562
52563+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52564+ goto out;
52565+
52566 ret = 0;
52567 i = filp->f_pos;
52568 switch (i) {
52569@@ -2347,7 +2462,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
52570 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
52571 void *cookie)
52572 {
52573- char *s = nd_get_link(nd);
52574+ const char *s = nd_get_link(nd);
52575 if (!IS_ERR(s))
52576 __putname(s);
52577 }
52578@@ -2553,7 +2668,7 @@ static const struct pid_entry tgid_base_stuff[] = {
52579 #ifdef CONFIG_SCHED_DEBUG
52580 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52581 #endif
52582-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52583+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52584 INF("syscall", S_IRUGO, proc_pid_syscall),
52585 #endif
52586 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52587@@ -2578,10 +2693,10 @@ static const struct pid_entry tgid_base_stuff[] = {
52588 #ifdef CONFIG_SECURITY
52589 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52590 #endif
52591-#ifdef CONFIG_KALLSYMS
52592+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52593 INF("wchan", S_IRUGO, proc_pid_wchan),
52594 #endif
52595-#ifdef CONFIG_STACKTRACE
52596+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52597 ONE("stack", S_IRUGO, proc_pid_stack),
52598 #endif
52599 #ifdef CONFIG_SCHEDSTATS
52600@@ -2611,6 +2726,9 @@ static const struct pid_entry tgid_base_stuff[] = {
52601 #ifdef CONFIG_TASK_IO_ACCOUNTING
52602 INF("io", S_IRUSR, proc_tgid_io_accounting),
52603 #endif
52604+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52605+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
52606+#endif
52607 };
52608
52609 static int proc_tgid_base_readdir(struct file * filp,
52610@@ -2735,7 +2853,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
52611 if (!inode)
52612 goto out;
52613
52614+#ifdef CONFIG_GRKERNSEC_PROC_USER
52615+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
52616+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52617+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52618+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
52619+#else
52620 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
52621+#endif
52622 inode->i_op = &proc_tgid_base_inode_operations;
52623 inode->i_fop = &proc_tgid_base_operations;
52624 inode->i_flags|=S_IMMUTABLE;
52625@@ -2777,7 +2902,14 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
52626 if (!task)
52627 goto out;
52628
52629+ if (!has_group_leader_pid(task))
52630+ goto out_put_task;
52631+
52632+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52633+ goto out_put_task;
52634+
52635 result = proc_pid_instantiate(dir, dentry, task, NULL);
52636+out_put_task:
52637 put_task_struct(task);
52638 out:
52639 return result;
52640@@ -2842,6 +2974,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52641 {
52642 unsigned int nr;
52643 struct task_struct *reaper;
52644+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52645+ const struct cred *tmpcred = current_cred();
52646+ const struct cred *itercred;
52647+#endif
52648+ filldir_t __filldir = filldir;
52649 struct tgid_iter iter;
52650 struct pid_namespace *ns;
52651
52652@@ -2865,8 +3002,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52653 for (iter = next_tgid(ns, iter);
52654 iter.task;
52655 iter.tgid += 1, iter = next_tgid(ns, iter)) {
52656+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52657+ rcu_read_lock();
52658+ itercred = __task_cred(iter.task);
52659+#endif
52660+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
52661+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52662+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
52663+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52664+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52665+#endif
52666+ )
52667+#endif
52668+ )
52669+ __filldir = &gr_fake_filldir;
52670+ else
52671+ __filldir = filldir;
52672+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52673+ rcu_read_unlock();
52674+#endif
52675 filp->f_pos = iter.tgid + TGID_OFFSET;
52676- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
52677+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
52678 put_task_struct(iter.task);
52679 goto out;
52680 }
52681@@ -2892,7 +3048,7 @@ static const struct pid_entry tid_base_stuff[] = {
52682 #ifdef CONFIG_SCHED_DEBUG
52683 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52684 #endif
52685-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52686+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52687 INF("syscall", S_IRUGO, proc_pid_syscall),
52688 #endif
52689 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52690@@ -2916,10 +3072,10 @@ static const struct pid_entry tid_base_stuff[] = {
52691 #ifdef CONFIG_SECURITY
52692 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52693 #endif
52694-#ifdef CONFIG_KALLSYMS
52695+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52696 INF("wchan", S_IRUGO, proc_pid_wchan),
52697 #endif
52698-#ifdef CONFIG_STACKTRACE
52699+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52700 ONE("stack", S_IRUGO, proc_pid_stack),
52701 #endif
52702 #ifdef CONFIG_SCHEDSTATS
52703diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
52704index 82676e3..5f8518a 100644
52705--- a/fs/proc/cmdline.c
52706+++ b/fs/proc/cmdline.c
52707@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
52708
52709 static int __init proc_cmdline_init(void)
52710 {
52711+#ifdef CONFIG_GRKERNSEC_PROC_ADD
52712+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
52713+#else
52714 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
52715+#endif
52716 return 0;
52717 }
52718 module_init(proc_cmdline_init);
52719diff --git a/fs/proc/devices.c b/fs/proc/devices.c
52720index 59ee7da..469b4b6 100644
52721--- a/fs/proc/devices.c
52722+++ b/fs/proc/devices.c
52723@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
52724
52725 static int __init proc_devices_init(void)
52726 {
52727+#ifdef CONFIG_GRKERNSEC_PROC_ADD
52728+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
52729+#else
52730 proc_create("devices", 0, NULL, &proc_devinfo_operations);
52731+#endif
52732 return 0;
52733 }
52734 module_init(proc_devices_init);
52735diff --git a/fs/proc/inode.c b/fs/proc/inode.c
52736index d78ade3..81767f9 100644
52737--- a/fs/proc/inode.c
52738+++ b/fs/proc/inode.c
52739@@ -18,12 +18,19 @@
52740 #include <linux/module.h>
52741 #include <linux/smp_lock.h>
52742 #include <linux/sysctl.h>
52743+#include <linux/grsecurity.h>
52744
52745 #include <asm/system.h>
52746 #include <asm/uaccess.h>
52747
52748 #include "internal.h"
52749
52750+#ifdef CONFIG_PROC_SYSCTL
52751+extern const struct inode_operations proc_sys_inode_operations;
52752+extern const struct inode_operations proc_sys_dir_operations;
52753+#endif
52754+
52755+
52756 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
52757 {
52758 atomic_inc(&de->count);
52759@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
52760 de_put(de);
52761 if (PROC_I(inode)->sysctl)
52762 sysctl_head_put(PROC_I(inode)->sysctl);
52763+
52764+#ifdef CONFIG_PROC_SYSCTL
52765+ if (inode->i_op == &proc_sys_inode_operations ||
52766+ inode->i_op == &proc_sys_dir_operations)
52767+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
52768+#endif
52769+
52770 clear_inode(inode);
52771 }
52772
52773@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
52774 if (de->mode) {
52775 inode->i_mode = de->mode;
52776 inode->i_uid = de->uid;
52777+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52778+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52779+#else
52780 inode->i_gid = de->gid;
52781+#endif
52782 }
52783 if (de->size)
52784 inode->i_size = de->size;
52785diff --git a/fs/proc/internal.h b/fs/proc/internal.h
52786index 753ca37..26bcf3b 100644
52787--- a/fs/proc/internal.h
52788+++ b/fs/proc/internal.h
52789@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52790 struct pid *pid, struct task_struct *task);
52791 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52792 struct pid *pid, struct task_struct *task);
52793+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52794+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
52795+#endif
52796 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
52797
52798 extern const struct file_operations proc_maps_operations;
52799diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
52800index b442dac..aab29cb 100644
52801--- a/fs/proc/kcore.c
52802+++ b/fs/proc/kcore.c
52803@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
52804 off_t offset = 0;
52805 struct kcore_list *m;
52806
52807+ pax_track_stack();
52808+
52809 /* setup ELF header */
52810 elf = (struct elfhdr *) bufp;
52811 bufp += sizeof(struct elfhdr);
52812@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52813 * the addresses in the elf_phdr on our list.
52814 */
52815 start = kc_offset_to_vaddr(*fpos - elf_buflen);
52816- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
52817+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
52818+ if (tsz > buflen)
52819 tsz = buflen;
52820-
52821+
52822 while (buflen) {
52823 struct kcore_list *m;
52824
52825@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52826 kfree(elf_buf);
52827 } else {
52828 if (kern_addr_valid(start)) {
52829- unsigned long n;
52830-
52831- n = copy_to_user(buffer, (char *)start, tsz);
52832- /*
52833- * We cannot distingush between fault on source
52834- * and fault on destination. When this happens
52835- * we clear too and hope it will trigger the
52836- * EFAULT again.
52837- */
52838- if (n) {
52839- if (clear_user(buffer + tsz - n,
52840- n))
52841+ char *elf_buf;
52842+ mm_segment_t oldfs;
52843+
52844+ elf_buf = kmalloc(tsz, GFP_KERNEL);
52845+ if (!elf_buf)
52846+ return -ENOMEM;
52847+ oldfs = get_fs();
52848+ set_fs(KERNEL_DS);
52849+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
52850+ set_fs(oldfs);
52851+ if (copy_to_user(buffer, elf_buf, tsz)) {
52852+ kfree(elf_buf);
52853 return -EFAULT;
52854+ }
52855 }
52856+ set_fs(oldfs);
52857+ kfree(elf_buf);
52858 } else {
52859 if (clear_user(buffer, tsz))
52860 return -EFAULT;
52861@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
52862
52863 static int open_kcore(struct inode *inode, struct file *filp)
52864 {
52865+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
52866+ return -EPERM;
52867+#endif
52868 if (!capable(CAP_SYS_RAWIO))
52869 return -EPERM;
52870 if (kcore_need_update)
52871diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
52872index a65239c..ad1182a 100644
52873--- a/fs/proc/meminfo.c
52874+++ b/fs/proc/meminfo.c
52875@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52876 unsigned long pages[NR_LRU_LISTS];
52877 int lru;
52878
52879+ pax_track_stack();
52880+
52881 /*
52882 * display in kilobytes.
52883 */
52884@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
52885 vmi.used >> 10,
52886 vmi.largest_chunk >> 10
52887 #ifdef CONFIG_MEMORY_FAILURE
52888- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
52889+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
52890 #endif
52891 );
52892
52893diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
52894index 9fe7d7e..cdb62c9 100644
52895--- a/fs/proc/nommu.c
52896+++ b/fs/proc/nommu.c
52897@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
52898 if (len < 1)
52899 len = 1;
52900 seq_printf(m, "%*c", len, ' ');
52901- seq_path(m, &file->f_path, "");
52902+ seq_path(m, &file->f_path, "\n\\");
52903 }
52904
52905 seq_putc(m, '\n');
52906diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
52907index 04d1270..25e1173 100644
52908--- a/fs/proc/proc_net.c
52909+++ b/fs/proc/proc_net.c
52910@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
52911 struct task_struct *task;
52912 struct nsproxy *ns;
52913 struct net *net = NULL;
52914+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52915+ const struct cred *cred = current_cred();
52916+#endif
52917+
52918+#ifdef CONFIG_GRKERNSEC_PROC_USER
52919+ if (cred->fsuid)
52920+ return net;
52921+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52922+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
52923+ return net;
52924+#endif
52925
52926 rcu_read_lock();
52927 task = pid_task(proc_pid(dir), PIDTYPE_PID);
52928diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
52929index f667e8a..55f4d96 100644
52930--- a/fs/proc/proc_sysctl.c
52931+++ b/fs/proc/proc_sysctl.c
52932@@ -7,11 +7,13 @@
52933 #include <linux/security.h>
52934 #include "internal.h"
52935
52936+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
52937+
52938 static const struct dentry_operations proc_sys_dentry_operations;
52939 static const struct file_operations proc_sys_file_operations;
52940-static const struct inode_operations proc_sys_inode_operations;
52941+const struct inode_operations proc_sys_inode_operations;
52942 static const struct file_operations proc_sys_dir_file_operations;
52943-static const struct inode_operations proc_sys_dir_operations;
52944+const struct inode_operations proc_sys_dir_operations;
52945
52946 static struct inode *proc_sys_make_inode(struct super_block *sb,
52947 struct ctl_table_header *head, struct ctl_table *table)
52948@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
52949 if (!p)
52950 goto out;
52951
52952+ if (gr_handle_sysctl(p, MAY_EXEC))
52953+ goto out;
52954+
52955 err = ERR_PTR(-ENOMEM);
52956 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
52957 if (h)
52958@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
52959
52960 err = NULL;
52961 dentry->d_op = &proc_sys_dentry_operations;
52962+
52963+ gr_handle_proc_create(dentry, inode);
52964+
52965 d_add(dentry, inode);
52966
52967 out:
52968@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
52969 return -ENOMEM;
52970 } else {
52971 child->d_op = &proc_sys_dentry_operations;
52972+
52973+ gr_handle_proc_create(child, inode);
52974+
52975 d_add(child, inode);
52976 }
52977 } else {
52978@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
52979 if (*pos < file->f_pos)
52980 continue;
52981
52982+ if (gr_handle_sysctl(table, 0))
52983+ continue;
52984+
52985 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
52986 if (res)
52987 return res;
52988@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
52989 if (IS_ERR(head))
52990 return PTR_ERR(head);
52991
52992+ if (table && gr_handle_sysctl(table, MAY_EXEC))
52993+ return -ENOENT;
52994+
52995 generic_fillattr(inode, stat);
52996 if (table)
52997 stat->mode = (stat->mode & S_IFMT) | table->mode;
52998@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
52999 };
53000
53001 static const struct file_operations proc_sys_dir_file_operations = {
53002+ .read = generic_read_dir,
53003 .readdir = proc_sys_readdir,
53004 .llseek = generic_file_llseek,
53005 };
53006
53007-static const struct inode_operations proc_sys_inode_operations = {
53008+const struct inode_operations proc_sys_inode_operations = {
53009 .permission = proc_sys_permission,
53010 .setattr = proc_sys_setattr,
53011 .getattr = proc_sys_getattr,
53012 };
53013
53014-static const struct inode_operations proc_sys_dir_operations = {
53015+const struct inode_operations proc_sys_dir_operations = {
53016 .lookup = proc_sys_lookup,
53017 .permission = proc_sys_permission,
53018 .setattr = proc_sys_setattr,
53019diff --git a/fs/proc/root.c b/fs/proc/root.c
53020index b080b79..d957e63 100644
53021--- a/fs/proc/root.c
53022+++ b/fs/proc/root.c
53023@@ -134,7 +134,15 @@ void __init proc_root_init(void)
53024 #ifdef CONFIG_PROC_DEVICETREE
53025 proc_device_tree_init();
53026 #endif
53027+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53028+#ifdef CONFIG_GRKERNSEC_PROC_USER
53029+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53030+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53031+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53032+#endif
53033+#else
53034 proc_mkdir("bus", NULL);
53035+#endif
53036 proc_sys_init();
53037 }
53038
53039diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53040index 3b7b82a..7dbb571 100644
53041--- a/fs/proc/task_mmu.c
53042+++ b/fs/proc/task_mmu.c
53043@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53044 "VmStk:\t%8lu kB\n"
53045 "VmExe:\t%8lu kB\n"
53046 "VmLib:\t%8lu kB\n"
53047- "VmPTE:\t%8lu kB\n",
53048- hiwater_vm << (PAGE_SHIFT-10),
53049+ "VmPTE:\t%8lu kB\n"
53050+
53051+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53052+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53053+#endif
53054+
53055+ ,hiwater_vm << (PAGE_SHIFT-10),
53056 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53057 mm->locked_vm << (PAGE_SHIFT-10),
53058 hiwater_rss << (PAGE_SHIFT-10),
53059 total_rss << (PAGE_SHIFT-10),
53060 data << (PAGE_SHIFT-10),
53061 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53062- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53063+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53064+
53065+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53066+ , mm->context.user_cs_base, mm->context.user_cs_limit
53067+#endif
53068+
53069+ );
53070 }
53071
53072 unsigned long task_vsize(struct mm_struct *mm)
53073@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53074 struct proc_maps_private *priv = m->private;
53075 struct vm_area_struct *vma = v;
53076
53077- vma_stop(priv, vma);
53078+ if (!IS_ERR(vma))
53079+ vma_stop(priv, vma);
53080 if (priv->task)
53081 put_task_struct(priv->task);
53082 }
53083@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53084 return ret;
53085 }
53086
53087+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53088+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53089+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53090+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53091+#endif
53092+
53093 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53094 {
53095 struct mm_struct *mm = vma->vm_mm;
53096@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53097 int flags = vma->vm_flags;
53098 unsigned long ino = 0;
53099 unsigned long long pgoff = 0;
53100- unsigned long start;
53101 dev_t dev = 0;
53102 int len;
53103
53104@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53105 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53106 }
53107
53108- /* We don't show the stack guard page in /proc/maps */
53109- start = vma->vm_start;
53110- if (vma->vm_flags & VM_GROWSDOWN)
53111- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53112- start += PAGE_SIZE;
53113-
53114 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53115- start,
53116+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53117+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53118+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53119+#else
53120+ vma->vm_start,
53121 vma->vm_end,
53122+#endif
53123 flags & VM_READ ? 'r' : '-',
53124 flags & VM_WRITE ? 'w' : '-',
53125 flags & VM_EXEC ? 'x' : '-',
53126 flags & VM_MAYSHARE ? 's' : 'p',
53127+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53128+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53129+#else
53130 pgoff,
53131+#endif
53132 MAJOR(dev), MINOR(dev), ino, &len);
53133
53134 /*
53135@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53136 */
53137 if (file) {
53138 pad_len_spaces(m, len);
53139- seq_path(m, &file->f_path, "\n");
53140+ seq_path(m, &file->f_path, "\n\\");
53141 } else {
53142 const char *name = arch_vma_name(vma);
53143 if (!name) {
53144@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53145 if (vma->vm_start <= mm->brk &&
53146 vma->vm_end >= mm->start_brk) {
53147 name = "[heap]";
53148- } else if (vma->vm_start <= mm->start_stack &&
53149- vma->vm_end >= mm->start_stack) {
53150+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53151+ (vma->vm_start <= mm->start_stack &&
53152+ vma->vm_end >= mm->start_stack)) {
53153 name = "[stack]";
53154 }
53155 } else {
53156@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53157 };
53158
53159 memset(&mss, 0, sizeof mss);
53160- mss.vma = vma;
53161- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53162- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53163+
53164+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53165+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53166+#endif
53167+ mss.vma = vma;
53168+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53169+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53170+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53171+ }
53172+#endif
53173
53174 show_map_vma(m, vma);
53175
53176@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53177 "Swap: %8lu kB\n"
53178 "KernelPageSize: %8lu kB\n"
53179 "MMUPageSize: %8lu kB\n",
53180+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53181+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53182+#else
53183 (vma->vm_end - vma->vm_start) >> 10,
53184+#endif
53185 mss.resident >> 10,
53186 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53187 mss.shared_clean >> 10,
53188diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53189index 8f5c05d..c99c76d 100644
53190--- a/fs/proc/task_nommu.c
53191+++ b/fs/proc/task_nommu.c
53192@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53193 else
53194 bytes += kobjsize(mm);
53195
53196- if (current->fs && current->fs->users > 1)
53197+ if (current->fs && atomic_read(&current->fs->users) > 1)
53198 sbytes += kobjsize(current->fs);
53199 else
53200 bytes += kobjsize(current->fs);
53201@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53202 if (len < 1)
53203 len = 1;
53204 seq_printf(m, "%*c", len, ' ');
53205- seq_path(m, &file->f_path, "");
53206+ seq_path(m, &file->f_path, "\n\\");
53207 }
53208
53209 seq_putc(m, '\n');
53210diff --git a/fs/readdir.c b/fs/readdir.c
53211index 7723401..30059a6 100644
53212--- a/fs/readdir.c
53213+++ b/fs/readdir.c
53214@@ -16,6 +16,7 @@
53215 #include <linux/security.h>
53216 #include <linux/syscalls.h>
53217 #include <linux/unistd.h>
53218+#include <linux/namei.h>
53219
53220 #include <asm/uaccess.h>
53221
53222@@ -67,6 +68,7 @@ struct old_linux_dirent {
53223
53224 struct readdir_callback {
53225 struct old_linux_dirent __user * dirent;
53226+ struct file * file;
53227 int result;
53228 };
53229
53230@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53231 buf->result = -EOVERFLOW;
53232 return -EOVERFLOW;
53233 }
53234+
53235+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53236+ return 0;
53237+
53238 buf->result++;
53239 dirent = buf->dirent;
53240 if (!access_ok(VERIFY_WRITE, dirent,
53241@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53242
53243 buf.result = 0;
53244 buf.dirent = dirent;
53245+ buf.file = file;
53246
53247 error = vfs_readdir(file, fillonedir, &buf);
53248 if (buf.result)
53249@@ -142,6 +149,7 @@ struct linux_dirent {
53250 struct getdents_callback {
53251 struct linux_dirent __user * current_dir;
53252 struct linux_dirent __user * previous;
53253+ struct file * file;
53254 int count;
53255 int error;
53256 };
53257@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53258 buf->error = -EOVERFLOW;
53259 return -EOVERFLOW;
53260 }
53261+
53262+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53263+ return 0;
53264+
53265 dirent = buf->previous;
53266 if (dirent) {
53267 if (__put_user(offset, &dirent->d_off))
53268@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53269 buf.previous = NULL;
53270 buf.count = count;
53271 buf.error = 0;
53272+ buf.file = file;
53273
53274 error = vfs_readdir(file, filldir, &buf);
53275 if (error >= 0)
53276@@ -228,6 +241,7 @@ out:
53277 struct getdents_callback64 {
53278 struct linux_dirent64 __user * current_dir;
53279 struct linux_dirent64 __user * previous;
53280+ struct file *file;
53281 int count;
53282 int error;
53283 };
53284@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53285 buf->error = -EINVAL; /* only used if we fail.. */
53286 if (reclen > buf->count)
53287 return -EINVAL;
53288+
53289+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53290+ return 0;
53291+
53292 dirent = buf->previous;
53293 if (dirent) {
53294 if (__put_user(offset, &dirent->d_off))
53295@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53296
53297 buf.current_dir = dirent;
53298 buf.previous = NULL;
53299+ buf.file = file;
53300 buf.count = count;
53301 buf.error = 0;
53302
53303@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53304 error = buf.error;
53305 lastdirent = buf.previous;
53306 if (lastdirent) {
53307- typeof(lastdirent->d_off) d_off = file->f_pos;
53308+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53309 if (__put_user(d_off, &lastdirent->d_off))
53310 error = -EFAULT;
53311 else
53312diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53313index d42c30c..4fd8718 100644
53314--- a/fs/reiserfs/dir.c
53315+++ b/fs/reiserfs/dir.c
53316@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53317 struct reiserfs_dir_entry de;
53318 int ret = 0;
53319
53320+ pax_track_stack();
53321+
53322 reiserfs_write_lock(inode->i_sb);
53323
53324 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53325diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53326index 128d3f7..8840d44 100644
53327--- a/fs/reiserfs/do_balan.c
53328+++ b/fs/reiserfs/do_balan.c
53329@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53330 return;
53331 }
53332
53333- atomic_inc(&(fs_generation(tb->tb_sb)));
53334+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53335 do_balance_starts(tb);
53336
53337 /* balance leaf returns 0 except if combining L R and S into
53338diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53339index 72cb1cc..d0e3181 100644
53340--- a/fs/reiserfs/item_ops.c
53341+++ b/fs/reiserfs/item_ops.c
53342@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53343 vi->vi_index, vi->vi_type, vi->vi_ih);
53344 }
53345
53346-static struct item_operations stat_data_ops = {
53347+static const struct item_operations stat_data_ops = {
53348 .bytes_number = sd_bytes_number,
53349 .decrement_key = sd_decrement_key,
53350 .is_left_mergeable = sd_is_left_mergeable,
53351@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53352 vi->vi_index, vi->vi_type, vi->vi_ih);
53353 }
53354
53355-static struct item_operations direct_ops = {
53356+static const struct item_operations direct_ops = {
53357 .bytes_number = direct_bytes_number,
53358 .decrement_key = direct_decrement_key,
53359 .is_left_mergeable = direct_is_left_mergeable,
53360@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53361 vi->vi_index, vi->vi_type, vi->vi_ih);
53362 }
53363
53364-static struct item_operations indirect_ops = {
53365+static const struct item_operations indirect_ops = {
53366 .bytes_number = indirect_bytes_number,
53367 .decrement_key = indirect_decrement_key,
53368 .is_left_mergeable = indirect_is_left_mergeable,
53369@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53370 printk("\n");
53371 }
53372
53373-static struct item_operations direntry_ops = {
53374+static const struct item_operations direntry_ops = {
53375 .bytes_number = direntry_bytes_number,
53376 .decrement_key = direntry_decrement_key,
53377 .is_left_mergeable = direntry_is_left_mergeable,
53378@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
53379 "Invalid item type observed, run fsck ASAP");
53380 }
53381
53382-static struct item_operations errcatch_ops = {
53383+static const struct item_operations errcatch_ops = {
53384 errcatch_bytes_number,
53385 errcatch_decrement_key,
53386 errcatch_is_left_mergeable,
53387@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
53388 #error Item types must use disk-format assigned values.
53389 #endif
53390
53391-struct item_operations *item_ops[TYPE_ANY + 1] = {
53392+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
53393 &stat_data_ops,
53394 &indirect_ops,
53395 &direct_ops,
53396diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
53397index b5fe0aa..e0e25c4 100644
53398--- a/fs/reiserfs/journal.c
53399+++ b/fs/reiserfs/journal.c
53400@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
53401 struct buffer_head *bh;
53402 int i, j;
53403
53404+ pax_track_stack();
53405+
53406 bh = __getblk(dev, block, bufsize);
53407 if (buffer_uptodate(bh))
53408 return (bh);
53409diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
53410index 2715791..b8996db 100644
53411--- a/fs/reiserfs/namei.c
53412+++ b/fs/reiserfs/namei.c
53413@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
53414 unsigned long savelink = 1;
53415 struct timespec ctime;
53416
53417+ pax_track_stack();
53418+
53419 /* three balancings: (1) old name removal, (2) new name insertion
53420 and (3) maybe "save" link insertion
53421 stat data updates: (1) old directory,
53422diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
53423index 9229e55..3d2e3b7 100644
53424--- a/fs/reiserfs/procfs.c
53425+++ b/fs/reiserfs/procfs.c
53426@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
53427 "SMALL_TAILS " : "NO_TAILS ",
53428 replay_only(sb) ? "REPLAY_ONLY " : "",
53429 convert_reiserfs(sb) ? "CONV " : "",
53430- atomic_read(&r->s_generation_counter),
53431+ atomic_read_unchecked(&r->s_generation_counter),
53432 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
53433 SF(s_do_balance), SF(s_unneeded_left_neighbor),
53434 SF(s_good_search_by_key_reada), SF(s_bmaps),
53435@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
53436 struct journal_params *jp = &rs->s_v1.s_journal;
53437 char b[BDEVNAME_SIZE];
53438
53439+ pax_track_stack();
53440+
53441 seq_printf(m, /* on-disk fields */
53442 "jp_journal_1st_block: \t%i\n"
53443 "jp_journal_dev: \t%s[%x]\n"
53444diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
53445index d036ee5..4c7dca1 100644
53446--- a/fs/reiserfs/stree.c
53447+++ b/fs/reiserfs/stree.c
53448@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
53449 int iter = 0;
53450 #endif
53451
53452+ pax_track_stack();
53453+
53454 BUG_ON(!th->t_trans_id);
53455
53456 init_tb_struct(th, &s_del_balance, sb, path,
53457@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
53458 int retval;
53459 int quota_cut_bytes = 0;
53460
53461+ pax_track_stack();
53462+
53463 BUG_ON(!th->t_trans_id);
53464
53465 le_key2cpu_key(&cpu_key, key);
53466@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
53467 int quota_cut_bytes;
53468 loff_t tail_pos = 0;
53469
53470+ pax_track_stack();
53471+
53472 BUG_ON(!th->t_trans_id);
53473
53474 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
53475@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
53476 int retval;
53477 int fs_gen;
53478
53479+ pax_track_stack();
53480+
53481 BUG_ON(!th->t_trans_id);
53482
53483 fs_gen = get_generation(inode->i_sb);
53484@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
53485 int fs_gen = 0;
53486 int quota_bytes = 0;
53487
53488+ pax_track_stack();
53489+
53490 BUG_ON(!th->t_trans_id);
53491
53492 if (inode) { /* Do we count quotas for item? */
53493diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
53494index f0ad05f..af3306f 100644
53495--- a/fs/reiserfs/super.c
53496+++ b/fs/reiserfs/super.c
53497@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
53498 {.option_name = NULL}
53499 };
53500
53501+ pax_track_stack();
53502+
53503 *blocks = 0;
53504 if (!options || !*options)
53505 /* use default configuration: create tails, journaling on, no
53506diff --git a/fs/select.c b/fs/select.c
53507index fd38ce2..f5381b8 100644
53508--- a/fs/select.c
53509+++ b/fs/select.c
53510@@ -20,6 +20,7 @@
53511 #include <linux/module.h>
53512 #include <linux/slab.h>
53513 #include <linux/poll.h>
53514+#include <linux/security.h>
53515 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
53516 #include <linux/file.h>
53517 #include <linux/fdtable.h>
53518@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
53519 int retval, i, timed_out = 0;
53520 unsigned long slack = 0;
53521
53522+ pax_track_stack();
53523+
53524 rcu_read_lock();
53525 retval = max_select_fd(n, fds);
53526 rcu_read_unlock();
53527@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
53528 /* Allocate small arguments on the stack to save memory and be faster */
53529 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
53530
53531+ pax_track_stack();
53532+
53533 ret = -EINVAL;
53534 if (n < 0)
53535 goto out_nofds;
53536@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
53537 struct poll_list *walk = head;
53538 unsigned long todo = nfds;
53539
53540+ pax_track_stack();
53541+
53542+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
53543 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
53544 return -EINVAL;
53545
53546diff --git a/fs/seq_file.c b/fs/seq_file.c
53547index eae7d9d..679f099 100644
53548--- a/fs/seq_file.c
53549+++ b/fs/seq_file.c
53550@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53551 return 0;
53552 }
53553 if (!m->buf) {
53554- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53555+ m->size = PAGE_SIZE;
53556+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53557 if (!m->buf)
53558 return -ENOMEM;
53559 }
53560@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53561 Eoverflow:
53562 m->op->stop(m, p);
53563 kfree(m->buf);
53564- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53565+ m->size <<= 1;
53566+ m->buf = kmalloc(m->size, GFP_KERNEL);
53567 return !m->buf ? -ENOMEM : -EAGAIN;
53568 }
53569
53570@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53571 m->version = file->f_version;
53572 /* grab buffer if we didn't have one */
53573 if (!m->buf) {
53574- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53575+ m->size = PAGE_SIZE;
53576+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53577 if (!m->buf)
53578 goto Enomem;
53579 }
53580@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53581 goto Fill;
53582 m->op->stop(m, p);
53583 kfree(m->buf);
53584- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53585+ m->size <<= 1;
53586+ m->buf = kmalloc(m->size, GFP_KERNEL);
53587 if (!m->buf)
53588 goto Enomem;
53589 m->count = 0;
53590@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
53591 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
53592 void *data)
53593 {
53594- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
53595+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
53596 int res = -ENOMEM;
53597
53598 if (op) {
53599diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
53600index 71c29b6..54694dd 100644
53601--- a/fs/smbfs/proc.c
53602+++ b/fs/smbfs/proc.c
53603@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
53604
53605 out:
53606 if (server->local_nls != NULL && server->remote_nls != NULL)
53607- server->ops->convert = convert_cp;
53608+ *(void **)&server->ops->convert = convert_cp;
53609 else
53610- server->ops->convert = convert_memcpy;
53611+ *(void **)&server->ops->convert = convert_memcpy;
53612
53613 smb_unlock_server(server);
53614 return n;
53615@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
53616
53617 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
53618 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
53619- server->ops->getattr = smb_proc_getattr_core;
53620+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
53621 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
53622- server->ops->getattr = smb_proc_getattr_ff;
53623+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
53624 }
53625
53626 /* Decode server capabilities */
53627@@ -3439,7 +3439,7 @@ out:
53628 static void
53629 install_ops(struct smb_ops *dst, struct smb_ops *src)
53630 {
53631- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53632+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53633 }
53634
53635 /* < LANMAN2 */
53636diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
53637index 00b2909..2ace383 100644
53638--- a/fs/smbfs/symlink.c
53639+++ b/fs/smbfs/symlink.c
53640@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
53641
53642 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53643 {
53644- char *s = nd_get_link(nd);
53645+ const char *s = nd_get_link(nd);
53646 if (!IS_ERR(s))
53647 __putname(s);
53648 }
53649diff --git a/fs/splice.c b/fs/splice.c
53650index bb92b7c..5aa72b0 100644
53651--- a/fs/splice.c
53652+++ b/fs/splice.c
53653@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53654 pipe_lock(pipe);
53655
53656 for (;;) {
53657- if (!pipe->readers) {
53658+ if (!atomic_read(&pipe->readers)) {
53659 send_sig(SIGPIPE, current, 0);
53660 if (!ret)
53661 ret = -EPIPE;
53662@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53663 do_wakeup = 0;
53664 }
53665
53666- pipe->waiting_writers++;
53667+ atomic_inc(&pipe->waiting_writers);
53668 pipe_wait(pipe);
53669- pipe->waiting_writers--;
53670+ atomic_dec(&pipe->waiting_writers);
53671 }
53672
53673 pipe_unlock(pipe);
53674@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
53675 .spd_release = spd_release_page,
53676 };
53677
53678+ pax_track_stack();
53679+
53680 index = *ppos >> PAGE_CACHE_SHIFT;
53681 loff = *ppos & ~PAGE_CACHE_MASK;
53682 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53683@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
53684 old_fs = get_fs();
53685 set_fs(get_ds());
53686 /* The cast to a user pointer is valid due to the set_fs() */
53687- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
53688+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
53689 set_fs(old_fs);
53690
53691 return res;
53692@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
53693 old_fs = get_fs();
53694 set_fs(get_ds());
53695 /* The cast to a user pointer is valid due to the set_fs() */
53696- res = vfs_write(file, (const char __user *)buf, count, &pos);
53697+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
53698 set_fs(old_fs);
53699
53700 return res;
53701@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53702 .spd_release = spd_release_page,
53703 };
53704
53705+ pax_track_stack();
53706+
53707 index = *ppos >> PAGE_CACHE_SHIFT;
53708 offset = *ppos & ~PAGE_CACHE_MASK;
53709 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53710@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53711 goto err;
53712
53713 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
53714- vec[i].iov_base = (void __user *) page_address(page);
53715+ vec[i].iov_base = (__force void __user *) page_address(page);
53716 vec[i].iov_len = this_len;
53717 pages[i] = page;
53718 spd.nr_pages++;
53719@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
53720 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
53721 {
53722 while (!pipe->nrbufs) {
53723- if (!pipe->writers)
53724+ if (!atomic_read(&pipe->writers))
53725 return 0;
53726
53727- if (!pipe->waiting_writers && sd->num_spliced)
53728+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
53729 return 0;
53730
53731 if (sd->flags & SPLICE_F_NONBLOCK)
53732@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
53733 * out of the pipe right after the splice_to_pipe(). So set
53734 * PIPE_READERS appropriately.
53735 */
53736- pipe->readers = 1;
53737+ atomic_set(&pipe->readers, 1);
53738
53739 current->splice_pipe = pipe;
53740 }
53741@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
53742 .spd_release = spd_release_page,
53743 };
53744
53745+ pax_track_stack();
53746+
53747 pipe = pipe_info(file->f_path.dentry->d_inode);
53748 if (!pipe)
53749 return -EBADF;
53750@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53751 ret = -ERESTARTSYS;
53752 break;
53753 }
53754- if (!pipe->writers)
53755+ if (!atomic_read(&pipe->writers))
53756 break;
53757- if (!pipe->waiting_writers) {
53758+ if (!atomic_read(&pipe->waiting_writers)) {
53759 if (flags & SPLICE_F_NONBLOCK) {
53760 ret = -EAGAIN;
53761 break;
53762@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53763 pipe_lock(pipe);
53764
53765 while (pipe->nrbufs >= PIPE_BUFFERS) {
53766- if (!pipe->readers) {
53767+ if (!atomic_read(&pipe->readers)) {
53768 send_sig(SIGPIPE, current, 0);
53769 ret = -EPIPE;
53770 break;
53771@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
53772 ret = -ERESTARTSYS;
53773 break;
53774 }
53775- pipe->waiting_writers++;
53776+ atomic_inc(&pipe->waiting_writers);
53777 pipe_wait(pipe);
53778- pipe->waiting_writers--;
53779+ atomic_dec(&pipe->waiting_writers);
53780 }
53781
53782 pipe_unlock(pipe);
53783@@ -1786,14 +1792,14 @@ retry:
53784 pipe_double_lock(ipipe, opipe);
53785
53786 do {
53787- if (!opipe->readers) {
53788+ if (!atomic_read(&opipe->readers)) {
53789 send_sig(SIGPIPE, current, 0);
53790 if (!ret)
53791 ret = -EPIPE;
53792 break;
53793 }
53794
53795- if (!ipipe->nrbufs && !ipipe->writers)
53796+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
53797 break;
53798
53799 /*
53800@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53801 pipe_double_lock(ipipe, opipe);
53802
53803 do {
53804- if (!opipe->readers) {
53805+ if (!atomic_read(&opipe->readers)) {
53806 send_sig(SIGPIPE, current, 0);
53807 if (!ret)
53808 ret = -EPIPE;
53809@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
53810 * return EAGAIN if we have the potential of some data in the
53811 * future, otherwise just return 0
53812 */
53813- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
53814+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
53815 ret = -EAGAIN;
53816
53817 pipe_unlock(ipipe);
53818diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
53819index 7118a38..70af853 100644
53820--- a/fs/sysfs/file.c
53821+++ b/fs/sysfs/file.c
53822@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
53823
53824 struct sysfs_open_dirent {
53825 atomic_t refcnt;
53826- atomic_t event;
53827+ atomic_unchecked_t event;
53828 wait_queue_head_t poll;
53829 struct list_head buffers; /* goes through sysfs_buffer.list */
53830 };
53831@@ -53,7 +53,7 @@ struct sysfs_buffer {
53832 size_t count;
53833 loff_t pos;
53834 char * page;
53835- struct sysfs_ops * ops;
53836+ const struct sysfs_ops * ops;
53837 struct mutex mutex;
53838 int needs_read_fill;
53839 int event;
53840@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53841 {
53842 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
53843 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53844- struct sysfs_ops * ops = buffer->ops;
53845+ const struct sysfs_ops * ops = buffer->ops;
53846 int ret = 0;
53847 ssize_t count;
53848
53849@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
53850 if (!sysfs_get_active_two(attr_sd))
53851 return -ENODEV;
53852
53853- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
53854+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
53855 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
53856
53857 sysfs_put_active_two(attr_sd);
53858@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
53859 {
53860 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
53861 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53862- struct sysfs_ops * ops = buffer->ops;
53863+ const struct sysfs_ops * ops = buffer->ops;
53864 int rc;
53865
53866 /* need attr_sd for attr and ops, its parent for kobj */
53867@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
53868 return -ENOMEM;
53869
53870 atomic_set(&new_od->refcnt, 0);
53871- atomic_set(&new_od->event, 1);
53872+ atomic_set_unchecked(&new_od->event, 1);
53873 init_waitqueue_head(&new_od->poll);
53874 INIT_LIST_HEAD(&new_od->buffers);
53875 goto retry;
53876@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
53877 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
53878 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
53879 struct sysfs_buffer *buffer;
53880- struct sysfs_ops *ops;
53881+ const struct sysfs_ops *ops;
53882 int error = -EACCES;
53883 char *p;
53884
53885@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
53886
53887 sysfs_put_active_two(attr_sd);
53888
53889- if (buffer->event != atomic_read(&od->event))
53890+ if (buffer->event != atomic_read_unchecked(&od->event))
53891 goto trigger;
53892
53893 return DEFAULT_POLLMASK;
53894@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
53895
53896 od = sd->s_attr.open;
53897 if (od) {
53898- atomic_inc(&od->event);
53899+ atomic_inc_unchecked(&od->event);
53900 wake_up_interruptible(&od->poll);
53901 }
53902
53903diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
53904index 4974995..c26609c 100644
53905--- a/fs/sysfs/mount.c
53906+++ b/fs/sysfs/mount.c
53907@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
53908 .s_name = "",
53909 .s_count = ATOMIC_INIT(1),
53910 .s_flags = SYSFS_DIR,
53911+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
53912+ .s_mode = S_IFDIR | S_IRWXU,
53913+#else
53914 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
53915+#endif
53916 .s_ino = 1,
53917 };
53918
53919diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
53920index c5081ad..342ea86 100644
53921--- a/fs/sysfs/symlink.c
53922+++ b/fs/sysfs/symlink.c
53923@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
53924
53925 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
53926 {
53927- char *page = nd_get_link(nd);
53928+ const char *page = nd_get_link(nd);
53929 if (!IS_ERR(page))
53930 free_page((unsigned long)page);
53931 }
53932diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
53933index 1e06853..b06d325 100644
53934--- a/fs/udf/balloc.c
53935+++ b/fs/udf/balloc.c
53936@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
53937
53938 mutex_lock(&sbi->s_alloc_mutex);
53939 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
53940- if (bloc->logicalBlockNum < 0 ||
53941- (bloc->logicalBlockNum + count) >
53942- partmap->s_partition_len) {
53943+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
53944 udf_debug("%d < %d || %d + %d > %d\n",
53945 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
53946 count, partmap->s_partition_len);
53947@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
53948
53949 mutex_lock(&sbi->s_alloc_mutex);
53950 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
53951- if (bloc->logicalBlockNum < 0 ||
53952- (bloc->logicalBlockNum + count) >
53953- partmap->s_partition_len) {
53954+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
53955 udf_debug("%d < %d || %d + %d > %d\n",
53956 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
53957 partmap->s_partition_len);
53958diff --git a/fs/udf/inode.c b/fs/udf/inode.c
53959index 6d24c2c..fff470f 100644
53960--- a/fs/udf/inode.c
53961+++ b/fs/udf/inode.c
53962@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
53963 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
53964 int lastblock = 0;
53965
53966+ pax_track_stack();
53967+
53968 prev_epos.offset = udf_file_entry_alloc_offset(inode);
53969 prev_epos.block = iinfo->i_location;
53970 prev_epos.bh = NULL;
53971diff --git a/fs/udf/misc.c b/fs/udf/misc.c
53972index 9215700..bf1f68e 100644
53973--- a/fs/udf/misc.c
53974+++ b/fs/udf/misc.c
53975@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
53976
53977 u8 udf_tag_checksum(const struct tag *t)
53978 {
53979- u8 *data = (u8 *)t;
53980+ const u8 *data = (const u8 *)t;
53981 u8 checksum = 0;
53982 int i;
53983 for (i = 0; i < sizeof(struct tag); ++i)
53984diff --git a/fs/utimes.c b/fs/utimes.c
53985index e4c75db..b4df0e0 100644
53986--- a/fs/utimes.c
53987+++ b/fs/utimes.c
53988@@ -1,6 +1,7 @@
53989 #include <linux/compiler.h>
53990 #include <linux/file.h>
53991 #include <linux/fs.h>
53992+#include <linux/security.h>
53993 #include <linux/linkage.h>
53994 #include <linux/mount.h>
53995 #include <linux/namei.h>
53996@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
53997 goto mnt_drop_write_and_out;
53998 }
53999 }
54000+
54001+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54002+ error = -EACCES;
54003+ goto mnt_drop_write_and_out;
54004+ }
54005+
54006 mutex_lock(&inode->i_mutex);
54007 error = notify_change(path->dentry, &newattrs);
54008 mutex_unlock(&inode->i_mutex);
54009diff --git a/fs/xattr.c b/fs/xattr.c
54010index 6d4f6d3..cda3958 100644
54011--- a/fs/xattr.c
54012+++ b/fs/xattr.c
54013@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54014 * Extended attribute SET operations
54015 */
54016 static long
54017-setxattr(struct dentry *d, const char __user *name, const void __user *value,
54018+setxattr(struct path *path, const char __user *name, const void __user *value,
54019 size_t size, int flags)
54020 {
54021 int error;
54022@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54023 return PTR_ERR(kvalue);
54024 }
54025
54026- error = vfs_setxattr(d, kname, kvalue, size, flags);
54027+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54028+ error = -EACCES;
54029+ goto out;
54030+ }
54031+
54032+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54033+out:
54034 kfree(kvalue);
54035 return error;
54036 }
54037@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54038 return error;
54039 error = mnt_want_write(path.mnt);
54040 if (!error) {
54041- error = setxattr(path.dentry, name, value, size, flags);
54042+ error = setxattr(&path, name, value, size, flags);
54043 mnt_drop_write(path.mnt);
54044 }
54045 path_put(&path);
54046@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54047 return error;
54048 error = mnt_want_write(path.mnt);
54049 if (!error) {
54050- error = setxattr(path.dentry, name, value, size, flags);
54051+ error = setxattr(&path, name, value, size, flags);
54052 mnt_drop_write(path.mnt);
54053 }
54054 path_put(&path);
54055@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54056 const void __user *,value, size_t, size, int, flags)
54057 {
54058 struct file *f;
54059- struct dentry *dentry;
54060 int error = -EBADF;
54061
54062 f = fget(fd);
54063 if (!f)
54064 return error;
54065- dentry = f->f_path.dentry;
54066- audit_inode(NULL, dentry);
54067+ audit_inode(NULL, f->f_path.dentry);
54068 error = mnt_want_write_file(f);
54069 if (!error) {
54070- error = setxattr(dentry, name, value, size, flags);
54071+ error = setxattr(&f->f_path, name, value, size, flags);
54072 mnt_drop_write(f->f_path.mnt);
54073 }
54074 fput(f);
54075diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54076index c6ad7c7..f2847a7 100644
54077--- a/fs/xattr_acl.c
54078+++ b/fs/xattr_acl.c
54079@@ -17,8 +17,8 @@
54080 struct posix_acl *
54081 posix_acl_from_xattr(const void *value, size_t size)
54082 {
54083- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54084- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54085+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54086+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54087 int count;
54088 struct posix_acl *acl;
54089 struct posix_acl_entry *acl_e;
54090diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54091index 942362f..88f96f5 100644
54092--- a/fs/xfs/linux-2.6/xfs_ioctl.c
54093+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54094@@ -134,7 +134,7 @@ xfs_find_handle(
54095 }
54096
54097 error = -EFAULT;
54098- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54099+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54100 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54101 goto out_put;
54102
54103@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54104 if (IS_ERR(dentry))
54105 return PTR_ERR(dentry);
54106
54107- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54108+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54109 if (!kbuf)
54110 goto out_dput;
54111
54112@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54113 xfs_mount_t *mp,
54114 void __user *arg)
54115 {
54116- xfs_fsop_geom_t fsgeo;
54117+ xfs_fsop_geom_t fsgeo;
54118 int error;
54119
54120 error = xfs_fs_geometry(mp, &fsgeo, 3);
54121diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54122index bad485a..479bd32 100644
54123--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54124+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54125@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54126 xfs_fsop_geom_t fsgeo;
54127 int error;
54128
54129+ memset(&fsgeo, 0, sizeof(fsgeo));
54130 error = xfs_fs_geometry(mp, &fsgeo, 3);
54131 if (error)
54132 return -error;
54133diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54134index 1f3b4b8..6102f6d 100644
54135--- a/fs/xfs/linux-2.6/xfs_iops.c
54136+++ b/fs/xfs/linux-2.6/xfs_iops.c
54137@@ -468,7 +468,7 @@ xfs_vn_put_link(
54138 struct nameidata *nd,
54139 void *p)
54140 {
54141- char *s = nd_get_link(nd);
54142+ const char *s = nd_get_link(nd);
54143
54144 if (!IS_ERR(s))
54145 kfree(s);
54146diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54147index 8971fb0..5fc1eb2 100644
54148--- a/fs/xfs/xfs_bmap.c
54149+++ b/fs/xfs/xfs_bmap.c
54150@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54151 int nmap,
54152 int ret_nmap);
54153 #else
54154-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54155+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54156 #endif /* DEBUG */
54157
54158 #if defined(XFS_RW_TRACE)
54159diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54160index e89734e..5e84d8d 100644
54161--- a/fs/xfs/xfs_dir2_sf.c
54162+++ b/fs/xfs/xfs_dir2_sf.c
54163@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54164 }
54165
54166 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54167- if (filldir(dirent, sfep->name, sfep->namelen,
54168+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54169+ char name[sfep->namelen];
54170+ memcpy(name, sfep->name, sfep->namelen);
54171+ if (filldir(dirent, name, sfep->namelen,
54172+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
54173+ *offset = off & 0x7fffffff;
54174+ return 0;
54175+ }
54176+ } else if (filldir(dirent, sfep->name, sfep->namelen,
54177 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54178 *offset = off & 0x7fffffff;
54179 return 0;
54180diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54181index 8f32f50..859e8a3 100644
54182--- a/fs/xfs/xfs_vnodeops.c
54183+++ b/fs/xfs/xfs_vnodeops.c
54184@@ -564,13 +564,17 @@ xfs_readlink(
54185
54186 xfs_ilock(ip, XFS_ILOCK_SHARED);
54187
54188- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54189- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54190-
54191 pathlen = ip->i_d.di_size;
54192 if (!pathlen)
54193 goto out;
54194
54195+ if (pathlen > MAXPATHLEN) {
54196+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54197+ __func__, (unsigned long long)ip->i_ino, pathlen);
54198+ ASSERT(0);
54199+ return XFS_ERROR(EFSCORRUPTED);
54200+ }
54201+
54202 if (ip->i_df.if_flags & XFS_IFINLINE) {
54203 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54204 link[pathlen] = '\0';
54205diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54206new file mode 100644
54207index 0000000..f27a8e8
54208--- /dev/null
54209+++ b/grsecurity/Kconfig
54210@@ -0,0 +1,1036 @@
54211+#
54212+# grecurity configuration
54213+#
54214+
54215+menu "Grsecurity"
54216+
54217+config GRKERNSEC
54218+ bool "Grsecurity"
54219+ select CRYPTO
54220+ select CRYPTO_SHA256
54221+ help
54222+ If you say Y here, you will be able to configure many features
54223+ that will enhance the security of your system. It is highly
54224+ recommended that you say Y here and read through the help
54225+ for each option so that you fully understand the features and
54226+ can evaluate their usefulness for your machine.
54227+
54228+choice
54229+ prompt "Security Level"
54230+ depends on GRKERNSEC
54231+ default GRKERNSEC_CUSTOM
54232+
54233+config GRKERNSEC_LOW
54234+ bool "Low"
54235+ select GRKERNSEC_LINK
54236+ select GRKERNSEC_FIFO
54237+ select GRKERNSEC_RANDNET
54238+ select GRKERNSEC_DMESG
54239+ select GRKERNSEC_CHROOT
54240+ select GRKERNSEC_CHROOT_CHDIR
54241+
54242+ help
54243+ If you choose this option, several of the grsecurity options will
54244+ be enabled that will give you greater protection against a number
54245+ of attacks, while assuring that none of your software will have any
54246+ conflicts with the additional security measures. If you run a lot
54247+ of unusual software, or you are having problems with the higher
54248+ security levels, you should say Y here. With this option, the
54249+ following features are enabled:
54250+
54251+ - Linking restrictions
54252+ - FIFO restrictions
54253+ - Restricted dmesg
54254+ - Enforced chdir("/") on chroot
54255+ - Runtime module disabling
54256+
54257+config GRKERNSEC_MEDIUM
54258+ bool "Medium"
54259+ select PAX
54260+ select PAX_EI_PAX
54261+ select PAX_PT_PAX_FLAGS
54262+ select PAX_HAVE_ACL_FLAGS
54263+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54264+ select GRKERNSEC_CHROOT
54265+ select GRKERNSEC_CHROOT_SYSCTL
54266+ select GRKERNSEC_LINK
54267+ select GRKERNSEC_FIFO
54268+ select GRKERNSEC_DMESG
54269+ select GRKERNSEC_RANDNET
54270+ select GRKERNSEC_FORKFAIL
54271+ select GRKERNSEC_TIME
54272+ select GRKERNSEC_SIGNAL
54273+ select GRKERNSEC_CHROOT
54274+ select GRKERNSEC_CHROOT_UNIX
54275+ select GRKERNSEC_CHROOT_MOUNT
54276+ select GRKERNSEC_CHROOT_PIVOT
54277+ select GRKERNSEC_CHROOT_DOUBLE
54278+ select GRKERNSEC_CHROOT_CHDIR
54279+ select GRKERNSEC_CHROOT_MKNOD
54280+ select GRKERNSEC_PROC
54281+ select GRKERNSEC_PROC_USERGROUP
54282+ select PAX_RANDUSTACK
54283+ select PAX_ASLR
54284+ select PAX_RANDMMAP
54285+ select PAX_REFCOUNT if (X86 || SPARC64)
54286+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54287+
54288+ help
54289+ If you say Y here, several features in addition to those included
54290+ in the low additional security level will be enabled. These
54291+ features provide even more security to your system, though in rare
54292+ cases they may be incompatible with very old or poorly written
54293+ software. If you enable this option, make sure that your auth
54294+ service (identd) is running as gid 1001. With this option,
54295+ the following features (in addition to those provided in the
54296+ low additional security level) will be enabled:
54297+
54298+ - Failed fork logging
54299+ - Time change logging
54300+ - Signal logging
54301+ - Deny mounts in chroot
54302+ - Deny double chrooting
54303+ - Deny sysctl writes in chroot
54304+ - Deny mknod in chroot
54305+ - Deny access to abstract AF_UNIX sockets out of chroot
54306+ - Deny pivot_root in chroot
54307+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54308+ - /proc restrictions with special GID set to 10 (usually wheel)
54309+ - Address Space Layout Randomization (ASLR)
54310+ - Prevent exploitation of most refcount overflows
54311+ - Bounds checking of copying between the kernel and userland
54312+
54313+config GRKERNSEC_HIGH
54314+ bool "High"
54315+ select GRKERNSEC_LINK
54316+ select GRKERNSEC_FIFO
54317+ select GRKERNSEC_DMESG
54318+ select GRKERNSEC_FORKFAIL
54319+ select GRKERNSEC_TIME
54320+ select GRKERNSEC_SIGNAL
54321+ select GRKERNSEC_CHROOT
54322+ select GRKERNSEC_CHROOT_SHMAT
54323+ select GRKERNSEC_CHROOT_UNIX
54324+ select GRKERNSEC_CHROOT_MOUNT
54325+ select GRKERNSEC_CHROOT_FCHDIR
54326+ select GRKERNSEC_CHROOT_PIVOT
54327+ select GRKERNSEC_CHROOT_DOUBLE
54328+ select GRKERNSEC_CHROOT_CHDIR
54329+ select GRKERNSEC_CHROOT_MKNOD
54330+ select GRKERNSEC_CHROOT_CAPS
54331+ select GRKERNSEC_CHROOT_SYSCTL
54332+ select GRKERNSEC_CHROOT_FINDTASK
54333+ select GRKERNSEC_SYSFS_RESTRICT
54334+ select GRKERNSEC_PROC
54335+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54336+ select GRKERNSEC_HIDESYM
54337+ select GRKERNSEC_BRUTE
54338+ select GRKERNSEC_PROC_USERGROUP
54339+ select GRKERNSEC_KMEM
54340+ select GRKERNSEC_RESLOG
54341+ select GRKERNSEC_RANDNET
54342+ select GRKERNSEC_PROC_ADD
54343+ select GRKERNSEC_CHROOT_CHMOD
54344+ select GRKERNSEC_CHROOT_NICE
54345+ select GRKERNSEC_AUDIT_MOUNT
54346+ select GRKERNSEC_MODHARDEN if (MODULES)
54347+ select GRKERNSEC_HARDEN_PTRACE
54348+ select GRKERNSEC_VM86 if (X86_32)
54349+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54350+ select PAX
54351+ select PAX_RANDUSTACK
54352+ select PAX_ASLR
54353+ select PAX_RANDMMAP
54354+ select PAX_NOEXEC
54355+ select PAX_MPROTECT
54356+ select PAX_EI_PAX
54357+ select PAX_PT_PAX_FLAGS
54358+ select PAX_HAVE_ACL_FLAGS
54359+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54360+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
54361+ select PAX_RANDKSTACK if (X86_TSC && X86)
54362+ select PAX_SEGMEXEC if (X86_32)
54363+ select PAX_PAGEEXEC
54364+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54365+ select PAX_EMUTRAMP if (PARISC)
54366+ select PAX_EMUSIGRT if (PARISC)
54367+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54368+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54369+ select PAX_REFCOUNT if (X86 || SPARC64)
54370+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54371+ help
54372+ If you say Y here, many of the features of grsecurity will be
54373+ enabled, which will protect you against many kinds of attacks
54374+ against your system. The heightened security comes at a cost
54375+ of an increased chance of incompatibilities with rare software
54376+ on your machine. Since this security level enables PaX, you should
54377+ view <http://pax.grsecurity.net> and read about the PaX
54378+ project. While you are there, download chpax and run it on
54379+ binaries that cause problems with PaX. Also remember that
54380+ since the /proc restrictions are enabled, you must run your
54381+ identd as gid 1001. This security level enables the following
54382+ features in addition to those listed in the low and medium
54383+ security levels:
54384+
54385+ - Additional /proc restrictions
54386+ - Chmod restrictions in chroot
54387+ - No signals, ptrace, or viewing of processes outside of chroot
54388+ - Capability restrictions in chroot
54389+ - Deny fchdir out of chroot
54390+ - Priority restrictions in chroot
54391+ - Segmentation-based implementation of PaX
54392+ - Mprotect restrictions
54393+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54394+ - Kernel stack randomization
54395+ - Mount/unmount/remount logging
54396+ - Kernel symbol hiding
54397+ - Hardening of module auto-loading
54398+ - Ptrace restrictions
54399+ - Restricted vm86 mode
54400+ - Restricted sysfs/debugfs
54401+ - Active kernel exploit response
54402+
54403+config GRKERNSEC_CUSTOM
54404+ bool "Custom"
54405+ help
54406+ If you say Y here, you will be able to configure every grsecurity
54407+ option, which allows you to enable many more features that aren't
54408+ covered in the basic security levels. These additional features
54409+ include TPE, socket restrictions, and the sysctl system for
54410+ grsecurity. It is advised that you read through the help for
54411+ each option to determine its usefulness in your situation.
54412+
54413+endchoice
54414+
54415+menu "Address Space Protection"
54416+depends on GRKERNSEC
54417+
54418+config GRKERNSEC_KMEM
54419+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
54420+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54421+ help
54422+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54423+ be written to or read from to modify or leak the contents of the running
54424+ kernel. /dev/port will also not be allowed to be opened. If you have module
54425+ support disabled, enabling this will close up four ways that are
54426+ currently used to insert malicious code into the running kernel.
54427+ Even with all these features enabled, we still highly recommend that
54428+ you use the RBAC system, as it is still possible for an attacker to
54429+ modify the running kernel through privileged I/O granted by ioperm/iopl.
54430+ If you are not using XFree86, you may be able to stop this additional
54431+ case by enabling the 'Disable privileged I/O' option. Though nothing
54432+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
54433+ but only to video memory, which is the only writing we allow in this
54434+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
54435+ not be allowed to mprotect it with PROT_WRITE later.
54436+ It is highly recommended that you say Y here if you meet all the
54437+ conditions above.
54438+
54439+config GRKERNSEC_VM86
54440+ bool "Restrict VM86 mode"
54441+ depends on X86_32
54442+
54443+ help
54444+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
54445+ make use of a special execution mode on 32bit x86 processors called
54446+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
54447+ video cards and will still work with this option enabled. The purpose
54448+ of the option is to prevent exploitation of emulation errors in
54449+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
54450+ Nearly all users should be able to enable this option.
54451+
54452+config GRKERNSEC_IO
54453+ bool "Disable privileged I/O"
54454+ depends on X86
54455+ select RTC_CLASS
54456+ select RTC_INTF_DEV
54457+ select RTC_DRV_CMOS
54458+
54459+ help
54460+ If you say Y here, all ioperm and iopl calls will return an error.
54461+ Ioperm and iopl can be used to modify the running kernel.
54462+ Unfortunately, some programs need this access to operate properly,
54463+ the most notable of which are XFree86 and hwclock. hwclock can be
54464+ remedied by having RTC support in the kernel, so real-time
54465+ clock support is enabled if this option is enabled, to ensure
54466+ that hwclock operates correctly. XFree86 still will not
54467+ operate correctly with this option enabled, so DO NOT CHOOSE Y
54468+ IF YOU USE XFree86. If you use XFree86 and you still want to
54469+ protect your kernel against modification, use the RBAC system.
54470+
54471+config GRKERNSEC_PROC_MEMMAP
54472+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
54473+ default y if (PAX_NOEXEC || PAX_ASLR)
54474+ depends on PAX_NOEXEC || PAX_ASLR
54475+ help
54476+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
54477+ give no information about the addresses of its mappings if
54478+ PaX features that rely on random addresses are enabled on the task.
54479+ If you use PaX it is greatly recommended that you say Y here as it
54480+ closes up a hole that makes the full ASLR useless for suid
54481+ binaries.
54482+
54483+config GRKERNSEC_BRUTE
54484+ bool "Deter exploit bruteforcing"
54485+ help
54486+ If you say Y here, attempts to bruteforce exploits against forking
54487+ daemons such as apache or sshd, as well as against suid/sgid binaries
54488+ will be deterred. When a child of a forking daemon is killed by PaX
54489+ or crashes due to an illegal instruction or other suspicious signal,
54490+ the parent process will be delayed 30 seconds upon every subsequent
54491+ fork until the administrator is able to assess the situation and
54492+ restart the daemon.
54493+ In the suid/sgid case, the attempt is logged, the user has all their
54494+ processes terminated, and they are prevented from executing any further
54495+ processes for 15 minutes.
54496+ It is recommended that you also enable signal logging in the auditing
54497+ section so that logs are generated when a process triggers a suspicious
54498+ signal.
54499+ If the sysctl option is enabled, a sysctl option with name
54500+ "deter_bruteforce" is created.
54501+
54502+config GRKERNSEC_MODHARDEN
54503+ bool "Harden module auto-loading"
54504+ depends on MODULES
54505+ help
54506+ If you say Y here, module auto-loading in response to use of some
54507+ feature implemented by an unloaded module will be restricted to
54508+ root users. Enabling this option helps defend against attacks
54509+ by unprivileged users who abuse the auto-loading behavior to
54510+ cause a vulnerable module to load that is then exploited.
54511+
54512+ If this option prevents a legitimate use of auto-loading for a
54513+ non-root user, the administrator can execute modprobe manually
54514+ with the exact name of the module mentioned in the alert log.
54515+ Alternatively, the administrator can add the module to the list
54516+ of modules loaded at boot by modifying init scripts.
54517+
54518+ Modification of init scripts will most likely be needed on
54519+ Ubuntu servers with encrypted home directory support enabled,
54520+ as the first non-root user logging in will cause the ecb(aes),
54521+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
54522+
54523+config GRKERNSEC_HIDESYM
54524+ bool "Hide kernel symbols"
54525+ help
54526+ If you say Y here, getting information on loaded modules, and
54527+ displaying all kernel symbols through a syscall will be restricted
54528+ to users with CAP_SYS_MODULE. For software compatibility reasons,
54529+ /proc/kallsyms will be restricted to the root user. The RBAC
54530+ system can hide that entry even from root.
54531+
54532+ This option also prevents leaking of kernel addresses through
54533+ several /proc entries.
54534+
54535+ Note that this option is only effective provided the following
54536+ conditions are met:
54537+ 1) The kernel using grsecurity is not precompiled by some distribution
54538+ 2) You have also enabled GRKERNSEC_DMESG
54539+ 3) You are using the RBAC system and hiding other files such as your
54540+ kernel image and System.map. Alternatively, enabling this option
54541+ causes the permissions on /boot, /lib/modules, and the kernel
54542+ source directory to change at compile time to prevent
54543+ reading by non-root users.
54544+ If the above conditions are met, this option will aid in providing a
54545+ useful protection against local kernel exploitation of overflows
54546+ and arbitrary read/write vulnerabilities.
54547+
54548+config GRKERNSEC_KERN_LOCKOUT
54549+ bool "Active kernel exploit response"
54550+ depends on X86 || ARM || PPC || SPARC
54551+ help
54552+ If you say Y here, when a PaX alert is triggered due to suspicious
54553+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
54554+ or an OOPs occurs due to bad memory accesses, instead of just
54555+ terminating the offending process (and potentially allowing
54556+ a subsequent exploit from the same user), we will take one of two
54557+ actions:
54558+ If the user was root, we will panic the system
54559+ If the user was non-root, we will log the attempt, terminate
54560+ all processes owned by the user, then prevent them from creating
54561+ any new processes until the system is restarted
54562+ This deters repeated kernel exploitation/bruteforcing attempts
54563+ and is useful for later forensics.
54564+
54565+endmenu
54566+menu "Role Based Access Control Options"
54567+depends on GRKERNSEC
54568+
54569+config GRKERNSEC_RBAC_DEBUG
54570+ bool
54571+
54572+config GRKERNSEC_NO_RBAC
54573+ bool "Disable RBAC system"
54574+ help
54575+ If you say Y here, the /dev/grsec device will be removed from the kernel,
54576+ preventing the RBAC system from being enabled. You should only say Y
54577+ here if you have no intention of using the RBAC system, so as to prevent
54578+ an attacker with root access from misusing the RBAC system to hide files
54579+ and processes when loadable module support and /dev/[k]mem have been
54580+ locked down.
54581+
54582+config GRKERNSEC_ACL_HIDEKERN
54583+ bool "Hide kernel processes"
54584+ help
54585+ If you say Y here, all kernel threads will be hidden to all
54586+ processes but those whose subject has the "view hidden processes"
54587+ flag.
54588+
54589+config GRKERNSEC_ACL_MAXTRIES
54590+ int "Maximum tries before password lockout"
54591+ default 3
54592+ help
54593+ This option enforces the maximum number of times a user can attempt
54594+ to authorize themselves with the grsecurity RBAC system before being
54595+ denied the ability to attempt authorization again for a specified time.
54596+ The lower the number, the harder it will be to brute-force a password.
54597+
54598+config GRKERNSEC_ACL_TIMEOUT
54599+ int "Time to wait after max password tries, in seconds"
54600+ default 30
54601+ help
54602+ This option specifies the time the user must wait after attempting to
54603+ authorize to the RBAC system with the maximum number of invalid
54604+ passwords. The higher the number, the harder it will be to brute-force
54605+ a password.
54606+
54607+endmenu
54608+menu "Filesystem Protections"
54609+depends on GRKERNSEC
54610+
54611+config GRKERNSEC_PROC
54612+ bool "Proc restrictions"
54613+ help
54614+ If you say Y here, the permissions of the /proc filesystem
54615+ will be altered to enhance system security and privacy. You MUST
54616+ choose either a user only restriction or a user and group restriction.
54617+ Depending upon the option you choose, you can either restrict users to
54618+ see only the processes they themselves run, or choose a group that can
54619+ view all processes and files normally restricted to root if you choose
54620+ the "restrict to user only" option. NOTE: If you're running identd as
54621+ a non-root user, you will have to run it as the group you specify here.
54622+
54623+config GRKERNSEC_PROC_USER
54624+ bool "Restrict /proc to user only"
54625+ depends on GRKERNSEC_PROC
54626+ help
54627+ If you say Y here, non-root users will only be able to view their own
54628+ processes, and restricts them from viewing network-related information,
54629+ and viewing kernel symbol and module information.
54630+
54631+config GRKERNSEC_PROC_USERGROUP
54632+ bool "Allow special group"
54633+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
54634+ help
54635+ If you say Y here, you will be able to select a group that will be
54636+ able to view all processes and network-related information. If you've
54637+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
54638+ remain hidden. This option is useful if you want to run identd as
54639+ a non-root user.
54640+
54641+config GRKERNSEC_PROC_GID
54642+ int "GID for special group"
54643+ depends on GRKERNSEC_PROC_USERGROUP
54644+ default 1001
54645+
54646+config GRKERNSEC_PROC_ADD
54647+ bool "Additional restrictions"
54648+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
54649+ help
54650+ If you say Y here, additional restrictions will be placed on
54651+ /proc that keep normal users from viewing device information and
54652+ slabinfo information that could be useful for exploits.
54653+
54654+config GRKERNSEC_LINK
54655+ bool "Linking restrictions"
54656+ help
54657+ If you say Y here, /tmp race exploits will be prevented, since users
54658+ will no longer be able to follow symlinks owned by other users in
54659+ world-writable +t directories (e.g. /tmp), unless the owner of the
54660+ symlink is the owner of the directory. users will also not be
54661+ able to hardlink to files they do not own. If the sysctl option is
54662+ enabled, a sysctl option with name "linking_restrictions" is created.
54663+
54664+config GRKERNSEC_FIFO
54665+ bool "FIFO restrictions"
54666+ help
54667+ If you say Y here, users will not be able to write to FIFOs they don't
54668+ own in world-writable +t directories (e.g. /tmp), unless the owner of
54669+ the FIFO is the same owner of the directory it's held in. If the sysctl
54670+ option is enabled, a sysctl option with name "fifo_restrictions" is
54671+ created.
54672+
54673+config GRKERNSEC_SYSFS_RESTRICT
54674+ bool "Sysfs/debugfs restriction"
54675+ depends on SYSFS
54676+ help
54677+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
54678+ any filesystem normally mounted under it (e.g. debugfs) will only
54679+ be accessible by root. These filesystems generally provide access
54680+ to hardware and debug information that isn't appropriate for unprivileged
54681+ users of the system. Sysfs and debugfs have also become a large source
54682+ of new vulnerabilities, ranging from infoleaks to local compromise.
54683+ There has been very little oversight with an eye toward security involved
54684+ in adding new exporters of information to these filesystems, so their
54685+ use is discouraged.
54686+ This option is equivalent to a chmod 0700 of the mount paths.
54687+
54688+config GRKERNSEC_ROFS
54689+ bool "Runtime read-only mount protection"
54690+ help
54691+ If you say Y here, a sysctl option with name "romount_protect" will
54692+ be created. By setting this option to 1 at runtime, filesystems
54693+ will be protected in the following ways:
54694+ * No new writable mounts will be allowed
54695+ * Existing read-only mounts won't be able to be remounted read/write
54696+ * Write operations will be denied on all block devices
54697+ This option acts independently of grsec_lock: once it is set to 1,
54698+ it cannot be turned off. Therefore, please be mindful of the resulting
54699+ behavior if this option is enabled in an init script on a read-only
54700+ filesystem. This feature is mainly intended for secure embedded systems.
54701+
54702+config GRKERNSEC_CHROOT
54703+ bool "Chroot jail restrictions"
54704+ help
54705+ If you say Y here, you will be able to choose several options that will
54706+ make breaking out of a chrooted jail much more difficult. If you
54707+ encounter no software incompatibilities with the following options, it
54708+ is recommended that you enable each one.
54709+
54710+config GRKERNSEC_CHROOT_MOUNT
54711+ bool "Deny mounts"
54712+ depends on GRKERNSEC_CHROOT
54713+ help
54714+ If you say Y here, processes inside a chroot will not be able to
54715+ mount or remount filesystems. If the sysctl option is enabled, a
54716+ sysctl option with name "chroot_deny_mount" is created.
54717+
54718+config GRKERNSEC_CHROOT_DOUBLE
54719+ bool "Deny double-chroots"
54720+ depends on GRKERNSEC_CHROOT
54721+ help
54722+ If you say Y here, processes inside a chroot will not be able to chroot
54723+ again outside the chroot. This is a widely used method of breaking
54724+ out of a chroot jail and should not be allowed. If the sysctl
54725+ option is enabled, a sysctl option with name
54726+ "chroot_deny_chroot" is created.
54727+
54728+config GRKERNSEC_CHROOT_PIVOT
54729+ bool "Deny pivot_root in chroot"
54730+ depends on GRKERNSEC_CHROOT
54731+ help
54732+ If you say Y here, processes inside a chroot will not be able to use
54733+ a function called pivot_root() that was introduced in Linux 2.3.41. It
54734+ works similar to chroot in that it changes the root filesystem. This
54735+ function could be misused in a chrooted process to attempt to break out
54736+ of the chroot, and therefore should not be allowed. If the sysctl
54737+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
54738+ created.
54739+
54740+config GRKERNSEC_CHROOT_CHDIR
54741+ bool "Enforce chdir(\"/\") on all chroots"
54742+ depends on GRKERNSEC_CHROOT
54743+ help
54744+ If you say Y here, the current working directory of all newly-chrooted
54745+ applications will be set to the the root directory of the chroot.
54746+ The man page on chroot(2) states:
54747+ Note that this call does not change the current working
54748+ directory, so that `.' can be outside the tree rooted at
54749+ `/'. In particular, the super-user can escape from a
54750+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
54751+
54752+ It is recommended that you say Y here, since it's not known to break
54753+ any software. If the sysctl option is enabled, a sysctl option with
54754+ name "chroot_enforce_chdir" is created.
54755+
54756+config GRKERNSEC_CHROOT_CHMOD
54757+ bool "Deny (f)chmod +s"
54758+ depends on GRKERNSEC_CHROOT
54759+ help
54760+ If you say Y here, processes inside a chroot will not be able to chmod
54761+ or fchmod files to make them have suid or sgid bits. This protects
54762+ against another published method of breaking a chroot. If the sysctl
54763+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
54764+ created.
54765+
54766+config GRKERNSEC_CHROOT_FCHDIR
54767+ bool "Deny fchdir out of chroot"
54768+ depends on GRKERNSEC_CHROOT
54769+ help
54770+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
54771+ to a file descriptor of the chrooting process that points to a directory
54772+ outside the filesystem will be stopped. If the sysctl option
54773+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
54774+
54775+config GRKERNSEC_CHROOT_MKNOD
54776+ bool "Deny mknod"
54777+ depends on GRKERNSEC_CHROOT
54778+ help
54779+ If you say Y here, processes inside a chroot will not be allowed to
54780+ mknod. The problem with using mknod inside a chroot is that it
54781+ would allow an attacker to create a device entry that is the same
54782+ as one on the physical root of your system, which could range from
54783+ anything from the console device to a device for your harddrive (which
54784+ they could then use to wipe the drive or steal data). It is recommended
54785+ that you say Y here, unless you run into software incompatibilities.
54786+ If the sysctl option is enabled, a sysctl option with name
54787+ "chroot_deny_mknod" is created.
54788+
54789+config GRKERNSEC_CHROOT_SHMAT
54790+ bool "Deny shmat() out of chroot"
54791+ depends on GRKERNSEC_CHROOT
54792+ help
54793+ If you say Y here, processes inside a chroot will not be able to attach
54794+ to shared memory segments that were created outside of the chroot jail.
54795+ It is recommended that you say Y here. If the sysctl option is enabled,
54796+ a sysctl option with name "chroot_deny_shmat" is created.
54797+
54798+config GRKERNSEC_CHROOT_UNIX
54799+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
54800+ depends on GRKERNSEC_CHROOT
54801+ help
54802+ If you say Y here, processes inside a chroot will not be able to
54803+ connect to abstract (meaning not belonging to a filesystem) Unix
54804+ domain sockets that were bound outside of a chroot. It is recommended
54805+ that you say Y here. If the sysctl option is enabled, a sysctl option
54806+ with name "chroot_deny_unix" is created.
54807+
54808+config GRKERNSEC_CHROOT_FINDTASK
54809+ bool "Protect outside processes"
54810+ depends on GRKERNSEC_CHROOT
54811+ help
54812+ If you say Y here, processes inside a chroot will not be able to
54813+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
54814+ getsid, or view any process outside of the chroot. If the sysctl
54815+ option is enabled, a sysctl option with name "chroot_findtask" is
54816+ created.
54817+
54818+config GRKERNSEC_CHROOT_NICE
54819+ bool "Restrict priority changes"
54820+ depends on GRKERNSEC_CHROOT
54821+ help
54822+ If you say Y here, processes inside a chroot will not be able to raise
54823+ the priority of processes in the chroot, or alter the priority of
54824+ processes outside the chroot. This provides more security than simply
54825+ removing CAP_SYS_NICE from the process' capability set. If the
54826+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
54827+ is created.
54828+
54829+config GRKERNSEC_CHROOT_SYSCTL
54830+ bool "Deny sysctl writes"
54831+ depends on GRKERNSEC_CHROOT
54832+ help
54833+ If you say Y here, an attacker in a chroot will not be able to
54834+ write to sysctl entries, either by sysctl(2) or through a /proc
54835+ interface. It is strongly recommended that you say Y here. If the
54836+ sysctl option is enabled, a sysctl option with name
54837+ "chroot_deny_sysctl" is created.
54838+
54839+config GRKERNSEC_CHROOT_CAPS
54840+ bool "Capability restrictions"
54841+ depends on GRKERNSEC_CHROOT
54842+ help
54843+ If you say Y here, the capabilities on all processes within a
54844+ chroot jail will be lowered to stop module insertion, raw i/o,
54845+ system and net admin tasks, rebooting the system, modifying immutable
54846+ files, modifying IPC owned by another, and changing the system time.
54847+ This is left an option because it can break some apps. Disable this
54848+ if your chrooted apps are having problems performing those kinds of
54849+ tasks. If the sysctl option is enabled, a sysctl option with
54850+ name "chroot_caps" is created.
54851+
54852+endmenu
54853+menu "Kernel Auditing"
54854+depends on GRKERNSEC
54855+
54856+config GRKERNSEC_AUDIT_GROUP
54857+ bool "Single group for auditing"
54858+ help
54859+ If you say Y here, the exec, chdir, and (un)mount logging features
54860+ will only operate on a group you specify. This option is recommended
54861+ if you only want to watch certain users instead of having a large
54862+ amount of logs from the entire system. If the sysctl option is enabled,
54863+ a sysctl option with name "audit_group" is created.
54864+
54865+config GRKERNSEC_AUDIT_GID
54866+ int "GID for auditing"
54867+ depends on GRKERNSEC_AUDIT_GROUP
54868+ default 1007
54869+
54870+config GRKERNSEC_EXECLOG
54871+ bool "Exec logging"
54872+ help
54873+ If you say Y here, all execve() calls will be logged (since the
54874+ other exec*() calls are frontends to execve(), all execution
54875+ will be logged). Useful for shell-servers that like to keep track
54876+ of their users. If the sysctl option is enabled, a sysctl option with
54877+ name "exec_logging" is created.
54878+ WARNING: This option when enabled will produce a LOT of logs, especially
54879+ on an active system.
54880+
54881+config GRKERNSEC_RESLOG
54882+ bool "Resource logging"
54883+ help
54884+ If you say Y here, all attempts to overstep resource limits will
54885+ be logged with the resource name, the requested size, and the current
54886+ limit. It is highly recommended that you say Y here. If the sysctl
54887+ option is enabled, a sysctl option with name "resource_logging" is
54888+ created. If the RBAC system is enabled, the sysctl value is ignored.
54889+
54890+config GRKERNSEC_CHROOT_EXECLOG
54891+ bool "Log execs within chroot"
54892+ help
54893+ If you say Y here, all executions inside a chroot jail will be logged
54894+ to syslog. This can cause a large amount of logs if certain
54895+ applications (eg. djb's daemontools) are installed on the system, and
54896+ is therefore left as an option. If the sysctl option is enabled, a
54897+ sysctl option with name "chroot_execlog" is created.
54898+
54899+config GRKERNSEC_AUDIT_PTRACE
54900+ bool "Ptrace logging"
54901+ help
54902+ If you say Y here, all attempts to attach to a process via ptrace
54903+ will be logged. If the sysctl option is enabled, a sysctl option
54904+ with name "audit_ptrace" is created.
54905+
54906+config GRKERNSEC_AUDIT_CHDIR
54907+ bool "Chdir logging"
54908+ help
54909+ If you say Y here, all chdir() calls will be logged. If the sysctl
54910+ option is enabled, a sysctl option with name "audit_chdir" is created.
54911+
54912+config GRKERNSEC_AUDIT_MOUNT
54913+ bool "(Un)Mount logging"
54914+ help
54915+ If you say Y here, all mounts and unmounts will be logged. If the
54916+ sysctl option is enabled, a sysctl option with name "audit_mount" is
54917+ created.
54918+
54919+config GRKERNSEC_SIGNAL
54920+ bool "Signal logging"
54921+ help
54922+ If you say Y here, certain important signals will be logged, such as
54923+ SIGSEGV, which will as a result inform you of when a error in a program
54924+ occurred, which in some cases could mean a possible exploit attempt.
54925+ If the sysctl option is enabled, a sysctl option with name
54926+ "signal_logging" is created.
54927+
54928+config GRKERNSEC_FORKFAIL
54929+ bool "Fork failure logging"
54930+ help
54931+ If you say Y here, all failed fork() attempts will be logged.
54932+ This could suggest a fork bomb, or someone attempting to overstep
54933+ their process limit. If the sysctl option is enabled, a sysctl option
54934+ with name "forkfail_logging" is created.
54935+
54936+config GRKERNSEC_TIME
54937+ bool "Time change logging"
54938+ help
54939+ If you say Y here, any changes of the system clock will be logged.
54940+ If the sysctl option is enabled, a sysctl option with name
54941+ "timechange_logging" is created.
54942+
54943+config GRKERNSEC_PROC_IPADDR
54944+ bool "/proc/<pid>/ipaddr support"
54945+ help
54946+ If you say Y here, a new entry will be added to each /proc/<pid>
54947+ directory that contains the IP address of the person using the task.
54948+ The IP is carried across local TCP and AF_UNIX stream sockets.
54949+ This information can be useful for IDS/IPSes to perform remote response
54950+ to a local attack. The entry is readable by only the owner of the
54951+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
54952+ the RBAC system), and thus does not create privacy concerns.
54953+
54954+config GRKERNSEC_RWXMAP_LOG
54955+ bool 'Denied RWX mmap/mprotect logging'
54956+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
54957+ help
54958+ If you say Y here, calls to mmap() and mprotect() with explicit
54959+ usage of PROT_WRITE and PROT_EXEC together will be logged when
54960+ denied by the PAX_MPROTECT feature. If the sysctl option is
54961+ enabled, a sysctl option with name "rwxmap_logging" is created.
54962+
54963+config GRKERNSEC_AUDIT_TEXTREL
54964+ bool 'ELF text relocations logging (READ HELP)'
54965+ depends on PAX_MPROTECT
54966+ help
54967+ If you say Y here, text relocations will be logged with the filename
54968+ of the offending library or binary. The purpose of the feature is
54969+ to help Linux distribution developers get rid of libraries and
54970+ binaries that need text relocations which hinder the future progress
54971+ of PaX. Only Linux distribution developers should say Y here, and
54972+ never on a production machine, as this option creates an information
54973+ leak that could aid an attacker in defeating the randomization of
54974+ a single memory region. If the sysctl option is enabled, a sysctl
54975+ option with name "audit_textrel" is created.
54976+
54977+endmenu
54978+
54979+menu "Executable Protections"
54980+depends on GRKERNSEC
54981+
54982+config GRKERNSEC_DMESG
54983+ bool "Dmesg(8) restriction"
54984+ help
54985+ If you say Y here, non-root users will not be able to use dmesg(8)
54986+ to view up to the last 4kb of messages in the kernel's log buffer.
54987+ The kernel's log buffer often contains kernel addresses and other
54988+ identifying information useful to an attacker in fingerprinting a
54989+ system for a targeted exploit.
54990+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
54991+ created.
54992+
54993+config GRKERNSEC_HARDEN_PTRACE
54994+ bool "Deter ptrace-based process snooping"
54995+ help
54996+ If you say Y here, TTY sniffers and other malicious monitoring
54997+ programs implemented through ptrace will be defeated. If you
54998+ have been using the RBAC system, this option has already been
54999+ enabled for several years for all users, with the ability to make
55000+ fine-grained exceptions.
55001+
55002+ This option only affects the ability of non-root users to ptrace
55003+ processes that are not a descendent of the ptracing process.
55004+ This means that strace ./binary and gdb ./binary will still work,
55005+ but attaching to arbitrary processes will not. If the sysctl
55006+ option is enabled, a sysctl option with name "harden_ptrace" is
55007+ created.
55008+
55009+config GRKERNSEC_TPE
55010+ bool "Trusted Path Execution (TPE)"
55011+ help
55012+ If you say Y here, you will be able to choose a gid to add to the
55013+ supplementary groups of users you want to mark as "untrusted."
55014+ These users will not be able to execute any files that are not in
55015+ root-owned directories writable only by root. If the sysctl option
55016+ is enabled, a sysctl option with name "tpe" is created.
55017+
55018+config GRKERNSEC_TPE_ALL
55019+ bool "Partially restrict all non-root users"
55020+ depends on GRKERNSEC_TPE
55021+ help
55022+ If you say Y here, all non-root users will be covered under
55023+ a weaker TPE restriction. This is separate from, and in addition to,
55024+ the main TPE options that you have selected elsewhere. Thus, if a
55025+ "trusted" GID is chosen, this restriction applies to even that GID.
55026+ Under this restriction, all non-root users will only be allowed to
55027+ execute files in directories they own that are not group or
55028+ world-writable, or in directories owned by root and writable only by
55029+ root. If the sysctl option is enabled, a sysctl option with name
55030+ "tpe_restrict_all" is created.
55031+
55032+config GRKERNSEC_TPE_INVERT
55033+ bool "Invert GID option"
55034+ depends on GRKERNSEC_TPE
55035+ help
55036+ If you say Y here, the group you specify in the TPE configuration will
55037+ decide what group TPE restrictions will be *disabled* for. This
55038+ option is useful if you want TPE restrictions to be applied to most
55039+ users on the system. If the sysctl option is enabled, a sysctl option
55040+ with name "tpe_invert" is created. Unlike other sysctl options, this
55041+ entry will default to on for backward-compatibility.
55042+
55043+config GRKERNSEC_TPE_GID
55044+ int "GID for untrusted users"
55045+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55046+ default 1005
55047+ help
55048+ Setting this GID determines what group TPE restrictions will be
55049+ *enabled* for. If the sysctl option is enabled, a sysctl option
55050+ with name "tpe_gid" is created.
55051+
55052+config GRKERNSEC_TPE_GID
55053+ int "GID for trusted users"
55054+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55055+ default 1005
55056+ help
55057+ Setting this GID determines what group TPE restrictions will be
55058+ *disabled* for. If the sysctl option is enabled, a sysctl option
55059+ with name "tpe_gid" is created.
55060+
55061+endmenu
55062+menu "Network Protections"
55063+depends on GRKERNSEC
55064+
55065+config GRKERNSEC_RANDNET
55066+ bool "Larger entropy pools"
55067+ help
55068+ If you say Y here, the entropy pools used for many features of Linux
55069+ and grsecurity will be doubled in size. Since several grsecurity
55070+ features use additional randomness, it is recommended that you say Y
55071+ here. Saying Y here has a similar effect as modifying
55072+ /proc/sys/kernel/random/poolsize.
55073+
55074+config GRKERNSEC_BLACKHOLE
55075+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55076+ depends on NET
55077+ help
55078+ If you say Y here, neither TCP resets nor ICMP
55079+ destination-unreachable packets will be sent in response to packets
55080+ sent to ports for which no associated listening process exists.
55081+ This feature supports both IPV4 and IPV6 and exempts the
55082+ loopback interface from blackholing. Enabling this feature
55083+ makes a host more resilient to DoS attacks and reduces network
55084+ visibility against scanners.
55085+
55086+ The blackhole feature as-implemented is equivalent to the FreeBSD
55087+ blackhole feature, as it prevents RST responses to all packets, not
55088+ just SYNs. Under most application behavior this causes no
55089+ problems, but applications (like haproxy) may not close certain
55090+ connections in a way that cleanly terminates them on the remote
55091+ end, leaving the remote host in LAST_ACK state. Because of this
55092+ side-effect and to prevent intentional LAST_ACK DoSes, this
55093+ feature also adds automatic mitigation against such attacks.
55094+ The mitigation drastically reduces the amount of time a socket
55095+ can spend in LAST_ACK state. If you're using haproxy and not
55096+ all servers it connects to have this option enabled, consider
55097+ disabling this feature on the haproxy host.
55098+
55099+ If the sysctl option is enabled, two sysctl options with names
55100+ "ip_blackhole" and "lastack_retries" will be created.
55101+ While "ip_blackhole" takes the standard zero/non-zero on/off
55102+ toggle, "lastack_retries" uses the same kinds of values as
55103+ "tcp_retries1" and "tcp_retries2". The default value of 4
55104+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55105+ state.
55106+
55107+config GRKERNSEC_SOCKET
55108+ bool "Socket restrictions"
55109+ depends on NET
55110+ help
55111+ If you say Y here, you will be able to choose from several options.
55112+ If you assign a GID on your system and add it to the supplementary
55113+ groups of users you want to restrict socket access to, this patch
55114+ will perform up to three things, based on the option(s) you choose.
55115+
55116+config GRKERNSEC_SOCKET_ALL
55117+ bool "Deny any sockets to group"
55118+ depends on GRKERNSEC_SOCKET
55119+ help
55120+ If you say Y here, you will be able to choose a GID of whose users will
55121+ be unable to connect to other hosts from your machine or run server
55122+ applications from your machine. If the sysctl option is enabled, a
55123+ sysctl option with name "socket_all" is created.
55124+
55125+config GRKERNSEC_SOCKET_ALL_GID
55126+ int "GID to deny all sockets for"
55127+ depends on GRKERNSEC_SOCKET_ALL
55128+ default 1004
55129+ help
55130+ Here you can choose the GID to disable socket access for. Remember to
55131+ add the users you want socket access disabled for to the GID
55132+ specified here. If the sysctl option is enabled, a sysctl option
55133+ with name "socket_all_gid" is created.
55134+
55135+config GRKERNSEC_SOCKET_CLIENT
55136+ bool "Deny client sockets to group"
55137+ depends on GRKERNSEC_SOCKET
55138+ help
55139+ If you say Y here, you will be able to choose a GID of whose users will
55140+ be unable to connect to other hosts from your machine, but will be
55141+ able to run servers. If this option is enabled, all users in the group
55142+ you specify will have to use passive mode when initiating ftp transfers
55143+ from the shell on your machine. If the sysctl option is enabled, a
55144+ sysctl option with name "socket_client" is created.
55145+
55146+config GRKERNSEC_SOCKET_CLIENT_GID
55147+ int "GID to deny client sockets for"
55148+ depends on GRKERNSEC_SOCKET_CLIENT
55149+ default 1003
55150+ help
55151+ Here you can choose the GID to disable client socket access for.
55152+ Remember to add the users you want client socket access disabled for to
55153+ the GID specified here. If the sysctl option is enabled, a sysctl
55154+ option with name "socket_client_gid" is created.
55155+
55156+config GRKERNSEC_SOCKET_SERVER
55157+ bool "Deny server sockets to group"
55158+ depends on GRKERNSEC_SOCKET
55159+ help
55160+ If you say Y here, you will be able to choose a GID of whose users will
55161+ be unable to run server applications from your machine. If the sysctl
55162+ option is enabled, a sysctl option with name "socket_server" is created.
55163+
55164+config GRKERNSEC_SOCKET_SERVER_GID
55165+ int "GID to deny server sockets for"
55166+ depends on GRKERNSEC_SOCKET_SERVER
55167+ default 1002
55168+ help
55169+ Here you can choose the GID to disable server socket access for.
55170+ Remember to add the users you want server socket access disabled for to
55171+ the GID specified here. If the sysctl option is enabled, a sysctl
55172+ option with name "socket_server_gid" is created.
55173+
55174+endmenu
55175+menu "Sysctl support"
55176+depends on GRKERNSEC && SYSCTL
55177+
55178+config GRKERNSEC_SYSCTL
55179+ bool "Sysctl support"
55180+ help
55181+ If you say Y here, you will be able to change the options that
55182+ grsecurity runs with at bootup, without having to recompile your
55183+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55184+ to enable (1) or disable (0) various features. All the sysctl entries
55185+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55186+ All features enabled in the kernel configuration are disabled at boot
55187+ if you do not say Y to the "Turn on features by default" option.
55188+ All options should be set at startup, and the grsec_lock entry should
55189+ be set to a non-zero value after all the options are set.
55190+ *THIS IS EXTREMELY IMPORTANT*
55191+
55192+config GRKERNSEC_SYSCTL_DISTRO
55193+ bool "Extra sysctl support for distro makers (READ HELP)"
55194+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55195+ help
55196+ If you say Y here, additional sysctl options will be created
55197+ for features that affect processes running as root. Therefore,
55198+ it is critical when using this option that the grsec_lock entry be
55199+ enabled after boot. Only distros with prebuilt kernel packages
55200+ with this option enabled that can ensure grsec_lock is enabled
55201+ after boot should use this option.
55202+ *Failure to set grsec_lock after boot makes all grsec features
55203+ this option covers useless*
55204+
55205+ Currently this option creates the following sysctl entries:
55206+ "Disable Privileged I/O": "disable_priv_io"
55207+
55208+config GRKERNSEC_SYSCTL_ON
55209+ bool "Turn on features by default"
55210+ depends on GRKERNSEC_SYSCTL
55211+ help
55212+ If you say Y here, instead of having all features enabled in the
55213+ kernel configuration disabled at boot time, the features will be
55214+ enabled at boot time. It is recommended you say Y here unless
55215+ there is some reason you would want all sysctl-tunable features to
55216+ be disabled by default. As mentioned elsewhere, it is important
55217+ to enable the grsec_lock entry once you have finished modifying
55218+ the sysctl entries.
55219+
55220+endmenu
55221+menu "Logging Options"
55222+depends on GRKERNSEC
55223+
55224+config GRKERNSEC_FLOODTIME
55225+ int "Seconds in between log messages (minimum)"
55226+ default 10
55227+ help
55228+ This option allows you to enforce the number of seconds between
55229+ grsecurity log messages. The default should be suitable for most
55230+ people, however, if you choose to change it, choose a value small enough
55231+ to allow informative logs to be produced, but large enough to
55232+ prevent flooding.
55233+
55234+config GRKERNSEC_FLOODBURST
55235+ int "Number of messages in a burst (maximum)"
55236+ default 6
55237+ help
55238+ This option allows you to choose the maximum number of messages allowed
55239+ within the flood time interval you chose in a separate option. The
55240+ default should be suitable for most people, however if you find that
55241+ many of your logs are being interpreted as flooding, you may want to
55242+ raise this value.
55243+
55244+endmenu
55245+
55246+endmenu
55247diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55248new file mode 100644
55249index 0000000..be9ae3a
55250--- /dev/null
55251+++ b/grsecurity/Makefile
55252@@ -0,0 +1,36 @@
55253+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55254+# during 2001-2009 it has been completely redesigned by Brad Spengler
55255+# into an RBAC system
55256+#
55257+# All code in this directory and various hooks inserted throughout the kernel
55258+# are copyright Brad Spengler - Open Source Security, Inc., and released
55259+# under the GPL v2 or higher
55260+
55261+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55262+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
55263+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55264+
55265+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55266+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55267+ gracl_learn.o grsec_log.o
55268+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55269+
55270+ifdef CONFIG_NET
55271+obj-y += grsec_sock.o
55272+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55273+endif
55274+
55275+ifndef CONFIG_GRKERNSEC
55276+obj-y += grsec_disabled.o
55277+endif
55278+
55279+ifdef CONFIG_GRKERNSEC_HIDESYM
55280+extra-y := grsec_hidesym.o
55281+$(obj)/grsec_hidesym.o:
55282+ @-chmod -f 500 /boot
55283+ @-chmod -f 500 /lib/modules
55284+ @-chmod -f 500 /lib64/modules
55285+ @-chmod -f 500 /lib32/modules
55286+ @-chmod -f 700 .
55287+ @echo ' grsec: protected kernel image paths'
55288+endif
55289diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55290new file mode 100644
55291index 0000000..6bd68d6
55292--- /dev/null
55293+++ b/grsecurity/gracl.c
55294@@ -0,0 +1,4141 @@
55295+#include <linux/kernel.h>
55296+#include <linux/module.h>
55297+#include <linux/sched.h>
55298+#include <linux/mm.h>
55299+#include <linux/file.h>
55300+#include <linux/fs.h>
55301+#include <linux/namei.h>
55302+#include <linux/mount.h>
55303+#include <linux/tty.h>
55304+#include <linux/proc_fs.h>
55305+#include <linux/smp_lock.h>
55306+#include <linux/slab.h>
55307+#include <linux/vmalloc.h>
55308+#include <linux/types.h>
55309+#include <linux/sysctl.h>
55310+#include <linux/netdevice.h>
55311+#include <linux/ptrace.h>
55312+#include <linux/gracl.h>
55313+#include <linux/gralloc.h>
55314+#include <linux/grsecurity.h>
55315+#include <linux/grinternal.h>
55316+#include <linux/pid_namespace.h>
55317+#include <linux/fdtable.h>
55318+#include <linux/percpu.h>
55319+
55320+#include <asm/uaccess.h>
55321+#include <asm/errno.h>
55322+#include <asm/mman.h>
55323+
55324+static struct acl_role_db acl_role_set;
55325+static struct name_db name_set;
55326+static struct inodev_db inodev_set;
55327+
55328+/* for keeping track of userspace pointers used for subjects, so we
55329+ can share references in the kernel as well
55330+*/
55331+
55332+static struct dentry *real_root;
55333+static struct vfsmount *real_root_mnt;
55334+
55335+static struct acl_subj_map_db subj_map_set;
55336+
55337+static struct acl_role_label *default_role;
55338+
55339+static struct acl_role_label *role_list;
55340+
55341+static u16 acl_sp_role_value;
55342+
55343+extern char *gr_shared_page[4];
55344+static DEFINE_MUTEX(gr_dev_mutex);
55345+DEFINE_RWLOCK(gr_inode_lock);
55346+
55347+struct gr_arg *gr_usermode;
55348+
55349+static unsigned int gr_status __read_only = GR_STATUS_INIT;
55350+
55351+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
55352+extern void gr_clear_learn_entries(void);
55353+
55354+#ifdef CONFIG_GRKERNSEC_RESLOG
55355+extern void gr_log_resource(const struct task_struct *task,
55356+ const int res, const unsigned long wanted, const int gt);
55357+#endif
55358+
55359+unsigned char *gr_system_salt;
55360+unsigned char *gr_system_sum;
55361+
55362+static struct sprole_pw **acl_special_roles = NULL;
55363+static __u16 num_sprole_pws = 0;
55364+
55365+static struct acl_role_label *kernel_role = NULL;
55366+
55367+static unsigned int gr_auth_attempts = 0;
55368+static unsigned long gr_auth_expires = 0UL;
55369+
55370+#ifdef CONFIG_NET
55371+extern struct vfsmount *sock_mnt;
55372+#endif
55373+extern struct vfsmount *pipe_mnt;
55374+extern struct vfsmount *shm_mnt;
55375+#ifdef CONFIG_HUGETLBFS
55376+extern struct vfsmount *hugetlbfs_vfsmount;
55377+#endif
55378+
55379+static struct acl_object_label *fakefs_obj_rw;
55380+static struct acl_object_label *fakefs_obj_rwx;
55381+
55382+extern int gr_init_uidset(void);
55383+extern void gr_free_uidset(void);
55384+extern void gr_remove_uid(uid_t uid);
55385+extern int gr_find_uid(uid_t uid);
55386+
55387+__inline__ int
55388+gr_acl_is_enabled(void)
55389+{
55390+ return (gr_status & GR_READY);
55391+}
55392+
55393+#ifdef CONFIG_BTRFS_FS
55394+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55395+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55396+#endif
55397+
55398+static inline dev_t __get_dev(const struct dentry *dentry)
55399+{
55400+#ifdef CONFIG_BTRFS_FS
55401+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55402+ return get_btrfs_dev_from_inode(dentry->d_inode);
55403+ else
55404+#endif
55405+ return dentry->d_inode->i_sb->s_dev;
55406+}
55407+
55408+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55409+{
55410+ return __get_dev(dentry);
55411+}
55412+
55413+static char gr_task_roletype_to_char(struct task_struct *task)
55414+{
55415+ switch (task->role->roletype &
55416+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
55417+ GR_ROLE_SPECIAL)) {
55418+ case GR_ROLE_DEFAULT:
55419+ return 'D';
55420+ case GR_ROLE_USER:
55421+ return 'U';
55422+ case GR_ROLE_GROUP:
55423+ return 'G';
55424+ case GR_ROLE_SPECIAL:
55425+ return 'S';
55426+ }
55427+
55428+ return 'X';
55429+}
55430+
55431+char gr_roletype_to_char(void)
55432+{
55433+ return gr_task_roletype_to_char(current);
55434+}
55435+
55436+__inline__ int
55437+gr_acl_tpe_check(void)
55438+{
55439+ if (unlikely(!(gr_status & GR_READY)))
55440+ return 0;
55441+ if (current->role->roletype & GR_ROLE_TPE)
55442+ return 1;
55443+ else
55444+ return 0;
55445+}
55446+
55447+int
55448+gr_handle_rawio(const struct inode *inode)
55449+{
55450+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55451+ if (inode && S_ISBLK(inode->i_mode) &&
55452+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55453+ !capable(CAP_SYS_RAWIO))
55454+ return 1;
55455+#endif
55456+ return 0;
55457+}
55458+
55459+static int
55460+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
55461+{
55462+ if (likely(lena != lenb))
55463+ return 0;
55464+
55465+ return !memcmp(a, b, lena);
55466+}
55467+
55468+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
55469+{
55470+ *buflen -= namelen;
55471+ if (*buflen < 0)
55472+ return -ENAMETOOLONG;
55473+ *buffer -= namelen;
55474+ memcpy(*buffer, str, namelen);
55475+ return 0;
55476+}
55477+
55478+/* this must be called with vfsmount_lock and dcache_lock held */
55479+
55480+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55481+ struct dentry *root, struct vfsmount *rootmnt,
55482+ char *buffer, int buflen)
55483+{
55484+ char * end = buffer+buflen;
55485+ char * retval;
55486+ int namelen;
55487+
55488+ *--end = '\0';
55489+ buflen--;
55490+
55491+ if (buflen < 1)
55492+ goto Elong;
55493+ /* Get '/' right */
55494+ retval = end-1;
55495+ *retval = '/';
55496+
55497+ for (;;) {
55498+ struct dentry * parent;
55499+
55500+ if (dentry == root && vfsmnt == rootmnt)
55501+ break;
55502+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
55503+ /* Global root? */
55504+ if (vfsmnt->mnt_parent == vfsmnt)
55505+ goto global_root;
55506+ dentry = vfsmnt->mnt_mountpoint;
55507+ vfsmnt = vfsmnt->mnt_parent;
55508+ continue;
55509+ }
55510+ parent = dentry->d_parent;
55511+ prefetch(parent);
55512+ namelen = dentry->d_name.len;
55513+ buflen -= namelen + 1;
55514+ if (buflen < 0)
55515+ goto Elong;
55516+ end -= namelen;
55517+ memcpy(end, dentry->d_name.name, namelen);
55518+ *--end = '/';
55519+ retval = end;
55520+ dentry = parent;
55521+ }
55522+
55523+out:
55524+ return retval;
55525+
55526+global_root:
55527+ namelen = dentry->d_name.len;
55528+ buflen -= namelen;
55529+ if (buflen < 0)
55530+ goto Elong;
55531+ retval -= namelen-1; /* hit the slash */
55532+ memcpy(retval, dentry->d_name.name, namelen);
55533+ goto out;
55534+Elong:
55535+ retval = ERR_PTR(-ENAMETOOLONG);
55536+ goto out;
55537+}
55538+
55539+static char *
55540+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55541+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
55542+{
55543+ char *retval;
55544+
55545+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
55546+ if (unlikely(IS_ERR(retval)))
55547+ retval = strcpy(buf, "<path too long>");
55548+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
55549+ retval[1] = '\0';
55550+
55551+ return retval;
55552+}
55553+
55554+static char *
55555+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55556+ char *buf, int buflen)
55557+{
55558+ char *res;
55559+
55560+ /* we can use real_root, real_root_mnt, because this is only called
55561+ by the RBAC system */
55562+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
55563+
55564+ return res;
55565+}
55566+
55567+static char *
55568+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55569+ char *buf, int buflen)
55570+{
55571+ char *res;
55572+ struct dentry *root;
55573+ struct vfsmount *rootmnt;
55574+ struct task_struct *reaper = &init_task;
55575+
55576+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
55577+ read_lock(&reaper->fs->lock);
55578+ root = dget(reaper->fs->root.dentry);
55579+ rootmnt = mntget(reaper->fs->root.mnt);
55580+ read_unlock(&reaper->fs->lock);
55581+
55582+ spin_lock(&dcache_lock);
55583+ spin_lock(&vfsmount_lock);
55584+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
55585+ spin_unlock(&vfsmount_lock);
55586+ spin_unlock(&dcache_lock);
55587+
55588+ dput(root);
55589+ mntput(rootmnt);
55590+ return res;
55591+}
55592+
55593+static char *
55594+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55595+{
55596+ char *ret;
55597+ spin_lock(&dcache_lock);
55598+ spin_lock(&vfsmount_lock);
55599+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55600+ PAGE_SIZE);
55601+ spin_unlock(&vfsmount_lock);
55602+ spin_unlock(&dcache_lock);
55603+ return ret;
55604+}
55605+
55606+static char *
55607+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55608+{
55609+ char *ret;
55610+ char *buf;
55611+ int buflen;
55612+
55613+ spin_lock(&dcache_lock);
55614+ spin_lock(&vfsmount_lock);
55615+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55616+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
55617+ buflen = (int)(ret - buf);
55618+ if (buflen >= 5)
55619+ prepend(&ret, &buflen, "/proc", 5);
55620+ else
55621+ ret = strcpy(buf, "<path too long>");
55622+ spin_unlock(&vfsmount_lock);
55623+ spin_unlock(&dcache_lock);
55624+ return ret;
55625+}
55626+
55627+char *
55628+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
55629+{
55630+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55631+ PAGE_SIZE);
55632+}
55633+
55634+char *
55635+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
55636+{
55637+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55638+ PAGE_SIZE);
55639+}
55640+
55641+char *
55642+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
55643+{
55644+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
55645+ PAGE_SIZE);
55646+}
55647+
55648+char *
55649+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
55650+{
55651+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
55652+ PAGE_SIZE);
55653+}
55654+
55655+char *
55656+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
55657+{
55658+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
55659+ PAGE_SIZE);
55660+}
55661+
55662+__inline__ __u32
55663+to_gr_audit(const __u32 reqmode)
55664+{
55665+ /* masks off auditable permission flags, then shifts them to create
55666+ auditing flags, and adds the special case of append auditing if
55667+ we're requesting write */
55668+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
55669+}
55670+
55671+struct acl_subject_label *
55672+lookup_subject_map(const struct acl_subject_label *userp)
55673+{
55674+ unsigned int index = shash(userp, subj_map_set.s_size);
55675+ struct subject_map *match;
55676+
55677+ match = subj_map_set.s_hash[index];
55678+
55679+ while (match && match->user != userp)
55680+ match = match->next;
55681+
55682+ if (match != NULL)
55683+ return match->kernel;
55684+ else
55685+ return NULL;
55686+}
55687+
55688+static void
55689+insert_subj_map_entry(struct subject_map *subjmap)
55690+{
55691+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
55692+ struct subject_map **curr;
55693+
55694+ subjmap->prev = NULL;
55695+
55696+ curr = &subj_map_set.s_hash[index];
55697+ if (*curr != NULL)
55698+ (*curr)->prev = subjmap;
55699+
55700+ subjmap->next = *curr;
55701+ *curr = subjmap;
55702+
55703+ return;
55704+}
55705+
55706+static struct acl_role_label *
55707+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
55708+ const gid_t gid)
55709+{
55710+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
55711+ struct acl_role_label *match;
55712+ struct role_allowed_ip *ipp;
55713+ unsigned int x;
55714+ u32 curr_ip = task->signal->curr_ip;
55715+
55716+ task->signal->saved_ip = curr_ip;
55717+
55718+ match = acl_role_set.r_hash[index];
55719+
55720+ while (match) {
55721+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
55722+ for (x = 0; x < match->domain_child_num; x++) {
55723+ if (match->domain_children[x] == uid)
55724+ goto found;
55725+ }
55726+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
55727+ break;
55728+ match = match->next;
55729+ }
55730+found:
55731+ if (match == NULL) {
55732+ try_group:
55733+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
55734+ match = acl_role_set.r_hash[index];
55735+
55736+ while (match) {
55737+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
55738+ for (x = 0; x < match->domain_child_num; x++) {
55739+ if (match->domain_children[x] == gid)
55740+ goto found2;
55741+ }
55742+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
55743+ break;
55744+ match = match->next;
55745+ }
55746+found2:
55747+ if (match == NULL)
55748+ match = default_role;
55749+ if (match->allowed_ips == NULL)
55750+ return match;
55751+ else {
55752+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
55753+ if (likely
55754+ ((ntohl(curr_ip) & ipp->netmask) ==
55755+ (ntohl(ipp->addr) & ipp->netmask)))
55756+ return match;
55757+ }
55758+ match = default_role;
55759+ }
55760+ } else if (match->allowed_ips == NULL) {
55761+ return match;
55762+ } else {
55763+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
55764+ if (likely
55765+ ((ntohl(curr_ip) & ipp->netmask) ==
55766+ (ntohl(ipp->addr) & ipp->netmask)))
55767+ return match;
55768+ }
55769+ goto try_group;
55770+ }
55771+
55772+ return match;
55773+}
55774+
55775+struct acl_subject_label *
55776+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
55777+ const struct acl_role_label *role)
55778+{
55779+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
55780+ struct acl_subject_label *match;
55781+
55782+ match = role->subj_hash[index];
55783+
55784+ while (match && (match->inode != ino || match->device != dev ||
55785+ (match->mode & GR_DELETED))) {
55786+ match = match->next;
55787+ }
55788+
55789+ if (match && !(match->mode & GR_DELETED))
55790+ return match;
55791+ else
55792+ return NULL;
55793+}
55794+
55795+struct acl_subject_label *
55796+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
55797+ const struct acl_role_label *role)
55798+{
55799+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
55800+ struct acl_subject_label *match;
55801+
55802+ match = role->subj_hash[index];
55803+
55804+ while (match && (match->inode != ino || match->device != dev ||
55805+ !(match->mode & GR_DELETED))) {
55806+ match = match->next;
55807+ }
55808+
55809+ if (match && (match->mode & GR_DELETED))
55810+ return match;
55811+ else
55812+ return NULL;
55813+}
55814+
55815+static struct acl_object_label *
55816+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
55817+ const struct acl_subject_label *subj)
55818+{
55819+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
55820+ struct acl_object_label *match;
55821+
55822+ match = subj->obj_hash[index];
55823+
55824+ while (match && (match->inode != ino || match->device != dev ||
55825+ (match->mode & GR_DELETED))) {
55826+ match = match->next;
55827+ }
55828+
55829+ if (match && !(match->mode & GR_DELETED))
55830+ return match;
55831+ else
55832+ return NULL;
55833+}
55834+
55835+static struct acl_object_label *
55836+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
55837+ const struct acl_subject_label *subj)
55838+{
55839+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
55840+ struct acl_object_label *match;
55841+
55842+ match = subj->obj_hash[index];
55843+
55844+ while (match && (match->inode != ino || match->device != dev ||
55845+ !(match->mode & GR_DELETED))) {
55846+ match = match->next;
55847+ }
55848+
55849+ if (match && (match->mode & GR_DELETED))
55850+ return match;
55851+
55852+ match = subj->obj_hash[index];
55853+
55854+ while (match && (match->inode != ino || match->device != dev ||
55855+ (match->mode & GR_DELETED))) {
55856+ match = match->next;
55857+ }
55858+
55859+ if (match && !(match->mode & GR_DELETED))
55860+ return match;
55861+ else
55862+ return NULL;
55863+}
55864+
55865+static struct name_entry *
55866+lookup_name_entry(const char *name)
55867+{
55868+ unsigned int len = strlen(name);
55869+ unsigned int key = full_name_hash(name, len);
55870+ unsigned int index = key % name_set.n_size;
55871+ struct name_entry *match;
55872+
55873+ match = name_set.n_hash[index];
55874+
55875+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
55876+ match = match->next;
55877+
55878+ return match;
55879+}
55880+
55881+static struct name_entry *
55882+lookup_name_entry_create(const char *name)
55883+{
55884+ unsigned int len = strlen(name);
55885+ unsigned int key = full_name_hash(name, len);
55886+ unsigned int index = key % name_set.n_size;
55887+ struct name_entry *match;
55888+
55889+ match = name_set.n_hash[index];
55890+
55891+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55892+ !match->deleted))
55893+ match = match->next;
55894+
55895+ if (match && match->deleted)
55896+ return match;
55897+
55898+ match = name_set.n_hash[index];
55899+
55900+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
55901+ match->deleted))
55902+ match = match->next;
55903+
55904+ if (match && !match->deleted)
55905+ return match;
55906+ else
55907+ return NULL;
55908+}
55909+
55910+static struct inodev_entry *
55911+lookup_inodev_entry(const ino_t ino, const dev_t dev)
55912+{
55913+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
55914+ struct inodev_entry *match;
55915+
55916+ match = inodev_set.i_hash[index];
55917+
55918+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
55919+ match = match->next;
55920+
55921+ return match;
55922+}
55923+
55924+static void
55925+insert_inodev_entry(struct inodev_entry *entry)
55926+{
55927+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
55928+ inodev_set.i_size);
55929+ struct inodev_entry **curr;
55930+
55931+ entry->prev = NULL;
55932+
55933+ curr = &inodev_set.i_hash[index];
55934+ if (*curr != NULL)
55935+ (*curr)->prev = entry;
55936+
55937+ entry->next = *curr;
55938+ *curr = entry;
55939+
55940+ return;
55941+}
55942+
55943+static void
55944+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
55945+{
55946+ unsigned int index =
55947+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
55948+ struct acl_role_label **curr;
55949+ struct acl_role_label *tmp;
55950+
55951+ curr = &acl_role_set.r_hash[index];
55952+
55953+ /* if role was already inserted due to domains and already has
55954+ a role in the same bucket as it attached, then we need to
55955+ combine these two buckets
55956+ */
55957+ if (role->next) {
55958+ tmp = role->next;
55959+ while (tmp->next)
55960+ tmp = tmp->next;
55961+ tmp->next = *curr;
55962+ } else
55963+ role->next = *curr;
55964+ *curr = role;
55965+
55966+ return;
55967+}
55968+
55969+static void
55970+insert_acl_role_label(struct acl_role_label *role)
55971+{
55972+ int i;
55973+
55974+ if (role_list == NULL) {
55975+ role_list = role;
55976+ role->prev = NULL;
55977+ } else {
55978+ role->prev = role_list;
55979+ role_list = role;
55980+ }
55981+
55982+ /* used for hash chains */
55983+ role->next = NULL;
55984+
55985+ if (role->roletype & GR_ROLE_DOMAIN) {
55986+ for (i = 0; i < role->domain_child_num; i++)
55987+ __insert_acl_role_label(role, role->domain_children[i]);
55988+ } else
55989+ __insert_acl_role_label(role, role->uidgid);
55990+}
55991+
55992+static int
55993+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
55994+{
55995+ struct name_entry **curr, *nentry;
55996+ struct inodev_entry *ientry;
55997+ unsigned int len = strlen(name);
55998+ unsigned int key = full_name_hash(name, len);
55999+ unsigned int index = key % name_set.n_size;
56000+
56001+ curr = &name_set.n_hash[index];
56002+
56003+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56004+ curr = &((*curr)->next);
56005+
56006+ if (*curr != NULL)
56007+ return 1;
56008+
56009+ nentry = acl_alloc(sizeof (struct name_entry));
56010+ if (nentry == NULL)
56011+ return 0;
56012+ ientry = acl_alloc(sizeof (struct inodev_entry));
56013+ if (ientry == NULL)
56014+ return 0;
56015+ ientry->nentry = nentry;
56016+
56017+ nentry->key = key;
56018+ nentry->name = name;
56019+ nentry->inode = inode;
56020+ nentry->device = device;
56021+ nentry->len = len;
56022+ nentry->deleted = deleted;
56023+
56024+ nentry->prev = NULL;
56025+ curr = &name_set.n_hash[index];
56026+ if (*curr != NULL)
56027+ (*curr)->prev = nentry;
56028+ nentry->next = *curr;
56029+ *curr = nentry;
56030+
56031+ /* insert us into the table searchable by inode/dev */
56032+ insert_inodev_entry(ientry);
56033+
56034+ return 1;
56035+}
56036+
56037+static void
56038+insert_acl_obj_label(struct acl_object_label *obj,
56039+ struct acl_subject_label *subj)
56040+{
56041+ unsigned int index =
56042+ fhash(obj->inode, obj->device, subj->obj_hash_size);
56043+ struct acl_object_label **curr;
56044+
56045+
56046+ obj->prev = NULL;
56047+
56048+ curr = &subj->obj_hash[index];
56049+ if (*curr != NULL)
56050+ (*curr)->prev = obj;
56051+
56052+ obj->next = *curr;
56053+ *curr = obj;
56054+
56055+ return;
56056+}
56057+
56058+static void
56059+insert_acl_subj_label(struct acl_subject_label *obj,
56060+ struct acl_role_label *role)
56061+{
56062+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56063+ struct acl_subject_label **curr;
56064+
56065+ obj->prev = NULL;
56066+
56067+ curr = &role->subj_hash[index];
56068+ if (*curr != NULL)
56069+ (*curr)->prev = obj;
56070+
56071+ obj->next = *curr;
56072+ *curr = obj;
56073+
56074+ return;
56075+}
56076+
56077+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56078+
56079+static void *
56080+create_table(__u32 * len, int elementsize)
56081+{
56082+ unsigned int table_sizes[] = {
56083+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56084+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56085+ 4194301, 8388593, 16777213, 33554393, 67108859
56086+ };
56087+ void *newtable = NULL;
56088+ unsigned int pwr = 0;
56089+
56090+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56091+ table_sizes[pwr] <= *len)
56092+ pwr++;
56093+
56094+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56095+ return newtable;
56096+
56097+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56098+ newtable =
56099+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56100+ else
56101+ newtable = vmalloc(table_sizes[pwr] * elementsize);
56102+
56103+ *len = table_sizes[pwr];
56104+
56105+ return newtable;
56106+}
56107+
56108+static int
56109+init_variables(const struct gr_arg *arg)
56110+{
56111+ struct task_struct *reaper = &init_task;
56112+ unsigned int stacksize;
56113+
56114+ subj_map_set.s_size = arg->role_db.num_subjects;
56115+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56116+ name_set.n_size = arg->role_db.num_objects;
56117+ inodev_set.i_size = arg->role_db.num_objects;
56118+
56119+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
56120+ !name_set.n_size || !inodev_set.i_size)
56121+ return 1;
56122+
56123+ if (!gr_init_uidset())
56124+ return 1;
56125+
56126+ /* set up the stack that holds allocation info */
56127+
56128+ stacksize = arg->role_db.num_pointers + 5;
56129+
56130+ if (!acl_alloc_stack_init(stacksize))
56131+ return 1;
56132+
56133+ /* grab reference for the real root dentry and vfsmount */
56134+ read_lock(&reaper->fs->lock);
56135+ real_root = dget(reaper->fs->root.dentry);
56136+ real_root_mnt = mntget(reaper->fs->root.mnt);
56137+ read_unlock(&reaper->fs->lock);
56138+
56139+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56140+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56141+#endif
56142+
56143+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56144+ if (fakefs_obj_rw == NULL)
56145+ return 1;
56146+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56147+
56148+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56149+ if (fakefs_obj_rwx == NULL)
56150+ return 1;
56151+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56152+
56153+ subj_map_set.s_hash =
56154+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56155+ acl_role_set.r_hash =
56156+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56157+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56158+ inodev_set.i_hash =
56159+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56160+
56161+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56162+ !name_set.n_hash || !inodev_set.i_hash)
56163+ return 1;
56164+
56165+ memset(subj_map_set.s_hash, 0,
56166+ sizeof(struct subject_map *) * subj_map_set.s_size);
56167+ memset(acl_role_set.r_hash, 0,
56168+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
56169+ memset(name_set.n_hash, 0,
56170+ sizeof (struct name_entry *) * name_set.n_size);
56171+ memset(inodev_set.i_hash, 0,
56172+ sizeof (struct inodev_entry *) * inodev_set.i_size);
56173+
56174+ return 0;
56175+}
56176+
56177+/* free information not needed after startup
56178+ currently contains user->kernel pointer mappings for subjects
56179+*/
56180+
56181+static void
56182+free_init_variables(void)
56183+{
56184+ __u32 i;
56185+
56186+ if (subj_map_set.s_hash) {
56187+ for (i = 0; i < subj_map_set.s_size; i++) {
56188+ if (subj_map_set.s_hash[i]) {
56189+ kfree(subj_map_set.s_hash[i]);
56190+ subj_map_set.s_hash[i] = NULL;
56191+ }
56192+ }
56193+
56194+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56195+ PAGE_SIZE)
56196+ kfree(subj_map_set.s_hash);
56197+ else
56198+ vfree(subj_map_set.s_hash);
56199+ }
56200+
56201+ return;
56202+}
56203+
56204+static void
56205+free_variables(void)
56206+{
56207+ struct acl_subject_label *s;
56208+ struct acl_role_label *r;
56209+ struct task_struct *task, *task2;
56210+ unsigned int x;
56211+
56212+ gr_clear_learn_entries();
56213+
56214+ read_lock(&tasklist_lock);
56215+ do_each_thread(task2, task) {
56216+ task->acl_sp_role = 0;
56217+ task->acl_role_id = 0;
56218+ task->acl = NULL;
56219+ task->role = NULL;
56220+ } while_each_thread(task2, task);
56221+ read_unlock(&tasklist_lock);
56222+
56223+ /* release the reference to the real root dentry and vfsmount */
56224+ if (real_root)
56225+ dput(real_root);
56226+ real_root = NULL;
56227+ if (real_root_mnt)
56228+ mntput(real_root_mnt);
56229+ real_root_mnt = NULL;
56230+
56231+ /* free all object hash tables */
56232+
56233+ FOR_EACH_ROLE_START(r)
56234+ if (r->subj_hash == NULL)
56235+ goto next_role;
56236+ FOR_EACH_SUBJECT_START(r, s, x)
56237+ if (s->obj_hash == NULL)
56238+ break;
56239+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56240+ kfree(s->obj_hash);
56241+ else
56242+ vfree(s->obj_hash);
56243+ FOR_EACH_SUBJECT_END(s, x)
56244+ FOR_EACH_NESTED_SUBJECT_START(r, s)
56245+ if (s->obj_hash == NULL)
56246+ break;
56247+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56248+ kfree(s->obj_hash);
56249+ else
56250+ vfree(s->obj_hash);
56251+ FOR_EACH_NESTED_SUBJECT_END(s)
56252+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56253+ kfree(r->subj_hash);
56254+ else
56255+ vfree(r->subj_hash);
56256+ r->subj_hash = NULL;
56257+next_role:
56258+ FOR_EACH_ROLE_END(r)
56259+
56260+ acl_free_all();
56261+
56262+ if (acl_role_set.r_hash) {
56263+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56264+ PAGE_SIZE)
56265+ kfree(acl_role_set.r_hash);
56266+ else
56267+ vfree(acl_role_set.r_hash);
56268+ }
56269+ if (name_set.n_hash) {
56270+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
56271+ PAGE_SIZE)
56272+ kfree(name_set.n_hash);
56273+ else
56274+ vfree(name_set.n_hash);
56275+ }
56276+
56277+ if (inodev_set.i_hash) {
56278+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56279+ PAGE_SIZE)
56280+ kfree(inodev_set.i_hash);
56281+ else
56282+ vfree(inodev_set.i_hash);
56283+ }
56284+
56285+ gr_free_uidset();
56286+
56287+ memset(&name_set, 0, sizeof (struct name_db));
56288+ memset(&inodev_set, 0, sizeof (struct inodev_db));
56289+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56290+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56291+
56292+ default_role = NULL;
56293+ role_list = NULL;
56294+
56295+ return;
56296+}
56297+
56298+static __u32
56299+count_user_objs(struct acl_object_label *userp)
56300+{
56301+ struct acl_object_label o_tmp;
56302+ __u32 num = 0;
56303+
56304+ while (userp) {
56305+ if (copy_from_user(&o_tmp, userp,
56306+ sizeof (struct acl_object_label)))
56307+ break;
56308+
56309+ userp = o_tmp.prev;
56310+ num++;
56311+ }
56312+
56313+ return num;
56314+}
56315+
56316+static struct acl_subject_label *
56317+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
56318+
56319+static int
56320+copy_user_glob(struct acl_object_label *obj)
56321+{
56322+ struct acl_object_label *g_tmp, **guser;
56323+ unsigned int len;
56324+ char *tmp;
56325+
56326+ if (obj->globbed == NULL)
56327+ return 0;
56328+
56329+ guser = &obj->globbed;
56330+ while (*guser) {
56331+ g_tmp = (struct acl_object_label *)
56332+ acl_alloc(sizeof (struct acl_object_label));
56333+ if (g_tmp == NULL)
56334+ return -ENOMEM;
56335+
56336+ if (copy_from_user(g_tmp, *guser,
56337+ sizeof (struct acl_object_label)))
56338+ return -EFAULT;
56339+
56340+ len = strnlen_user(g_tmp->filename, PATH_MAX);
56341+
56342+ if (!len || len >= PATH_MAX)
56343+ return -EINVAL;
56344+
56345+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56346+ return -ENOMEM;
56347+
56348+ if (copy_from_user(tmp, g_tmp->filename, len))
56349+ return -EFAULT;
56350+ tmp[len-1] = '\0';
56351+ g_tmp->filename = tmp;
56352+
56353+ *guser = g_tmp;
56354+ guser = &(g_tmp->next);
56355+ }
56356+
56357+ return 0;
56358+}
56359+
56360+static int
56361+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
56362+ struct acl_role_label *role)
56363+{
56364+ struct acl_object_label *o_tmp;
56365+ unsigned int len;
56366+ int ret;
56367+ char *tmp;
56368+
56369+ while (userp) {
56370+ if ((o_tmp = (struct acl_object_label *)
56371+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
56372+ return -ENOMEM;
56373+
56374+ if (copy_from_user(o_tmp, userp,
56375+ sizeof (struct acl_object_label)))
56376+ return -EFAULT;
56377+
56378+ userp = o_tmp->prev;
56379+
56380+ len = strnlen_user(o_tmp->filename, PATH_MAX);
56381+
56382+ if (!len || len >= PATH_MAX)
56383+ return -EINVAL;
56384+
56385+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56386+ return -ENOMEM;
56387+
56388+ if (copy_from_user(tmp, o_tmp->filename, len))
56389+ return -EFAULT;
56390+ tmp[len-1] = '\0';
56391+ o_tmp->filename = tmp;
56392+
56393+ insert_acl_obj_label(o_tmp, subj);
56394+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
56395+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
56396+ return -ENOMEM;
56397+
56398+ ret = copy_user_glob(o_tmp);
56399+ if (ret)
56400+ return ret;
56401+
56402+ if (o_tmp->nested) {
56403+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
56404+ if (IS_ERR(o_tmp->nested))
56405+ return PTR_ERR(o_tmp->nested);
56406+
56407+ /* insert into nested subject list */
56408+ o_tmp->nested->next = role->hash->first;
56409+ role->hash->first = o_tmp->nested;
56410+ }
56411+ }
56412+
56413+ return 0;
56414+}
56415+
56416+static __u32
56417+count_user_subjs(struct acl_subject_label *userp)
56418+{
56419+ struct acl_subject_label s_tmp;
56420+ __u32 num = 0;
56421+
56422+ while (userp) {
56423+ if (copy_from_user(&s_tmp, userp,
56424+ sizeof (struct acl_subject_label)))
56425+ break;
56426+
56427+ userp = s_tmp.prev;
56428+ /* do not count nested subjects against this count, since
56429+ they are not included in the hash table, but are
56430+ attached to objects. We have already counted
56431+ the subjects in userspace for the allocation
56432+ stack
56433+ */
56434+ if (!(s_tmp.mode & GR_NESTED))
56435+ num++;
56436+ }
56437+
56438+ return num;
56439+}
56440+
56441+static int
56442+copy_user_allowedips(struct acl_role_label *rolep)
56443+{
56444+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
56445+
56446+ ruserip = rolep->allowed_ips;
56447+
56448+ while (ruserip) {
56449+ rlast = rtmp;
56450+
56451+ if ((rtmp = (struct role_allowed_ip *)
56452+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
56453+ return -ENOMEM;
56454+
56455+ if (copy_from_user(rtmp, ruserip,
56456+ sizeof (struct role_allowed_ip)))
56457+ return -EFAULT;
56458+
56459+ ruserip = rtmp->prev;
56460+
56461+ if (!rlast) {
56462+ rtmp->prev = NULL;
56463+ rolep->allowed_ips = rtmp;
56464+ } else {
56465+ rlast->next = rtmp;
56466+ rtmp->prev = rlast;
56467+ }
56468+
56469+ if (!ruserip)
56470+ rtmp->next = NULL;
56471+ }
56472+
56473+ return 0;
56474+}
56475+
56476+static int
56477+copy_user_transitions(struct acl_role_label *rolep)
56478+{
56479+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
56480+
56481+ unsigned int len;
56482+ char *tmp;
56483+
56484+ rusertp = rolep->transitions;
56485+
56486+ while (rusertp) {
56487+ rlast = rtmp;
56488+
56489+ if ((rtmp = (struct role_transition *)
56490+ acl_alloc(sizeof (struct role_transition))) == NULL)
56491+ return -ENOMEM;
56492+
56493+ if (copy_from_user(rtmp, rusertp,
56494+ sizeof (struct role_transition)))
56495+ return -EFAULT;
56496+
56497+ rusertp = rtmp->prev;
56498+
56499+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
56500+
56501+ if (!len || len >= GR_SPROLE_LEN)
56502+ return -EINVAL;
56503+
56504+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56505+ return -ENOMEM;
56506+
56507+ if (copy_from_user(tmp, rtmp->rolename, len))
56508+ return -EFAULT;
56509+ tmp[len-1] = '\0';
56510+ rtmp->rolename = tmp;
56511+
56512+ if (!rlast) {
56513+ rtmp->prev = NULL;
56514+ rolep->transitions = rtmp;
56515+ } else {
56516+ rlast->next = rtmp;
56517+ rtmp->prev = rlast;
56518+ }
56519+
56520+ if (!rusertp)
56521+ rtmp->next = NULL;
56522+ }
56523+
56524+ return 0;
56525+}
56526+
56527+static struct acl_subject_label *
56528+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
56529+{
56530+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
56531+ unsigned int len;
56532+ char *tmp;
56533+ __u32 num_objs;
56534+ struct acl_ip_label **i_tmp, *i_utmp2;
56535+ struct gr_hash_struct ghash;
56536+ struct subject_map *subjmap;
56537+ unsigned int i_num;
56538+ int err;
56539+
56540+ s_tmp = lookup_subject_map(userp);
56541+
56542+ /* we've already copied this subject into the kernel, just return
56543+ the reference to it, and don't copy it over again
56544+ */
56545+ if (s_tmp)
56546+ return(s_tmp);
56547+
56548+ if ((s_tmp = (struct acl_subject_label *)
56549+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
56550+ return ERR_PTR(-ENOMEM);
56551+
56552+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
56553+ if (subjmap == NULL)
56554+ return ERR_PTR(-ENOMEM);
56555+
56556+ subjmap->user = userp;
56557+ subjmap->kernel = s_tmp;
56558+ insert_subj_map_entry(subjmap);
56559+
56560+ if (copy_from_user(s_tmp, userp,
56561+ sizeof (struct acl_subject_label)))
56562+ return ERR_PTR(-EFAULT);
56563+
56564+ len = strnlen_user(s_tmp->filename, PATH_MAX);
56565+
56566+ if (!len || len >= PATH_MAX)
56567+ return ERR_PTR(-EINVAL);
56568+
56569+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56570+ return ERR_PTR(-ENOMEM);
56571+
56572+ if (copy_from_user(tmp, s_tmp->filename, len))
56573+ return ERR_PTR(-EFAULT);
56574+ tmp[len-1] = '\0';
56575+ s_tmp->filename = tmp;
56576+
56577+ if (!strcmp(s_tmp->filename, "/"))
56578+ role->root_label = s_tmp;
56579+
56580+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
56581+ return ERR_PTR(-EFAULT);
56582+
56583+ /* copy user and group transition tables */
56584+
56585+ if (s_tmp->user_trans_num) {
56586+ uid_t *uidlist;
56587+
56588+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
56589+ if (uidlist == NULL)
56590+ return ERR_PTR(-ENOMEM);
56591+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
56592+ return ERR_PTR(-EFAULT);
56593+
56594+ s_tmp->user_transitions = uidlist;
56595+ }
56596+
56597+ if (s_tmp->group_trans_num) {
56598+ gid_t *gidlist;
56599+
56600+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
56601+ if (gidlist == NULL)
56602+ return ERR_PTR(-ENOMEM);
56603+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
56604+ return ERR_PTR(-EFAULT);
56605+
56606+ s_tmp->group_transitions = gidlist;
56607+ }
56608+
56609+ /* set up object hash table */
56610+ num_objs = count_user_objs(ghash.first);
56611+
56612+ s_tmp->obj_hash_size = num_objs;
56613+ s_tmp->obj_hash =
56614+ (struct acl_object_label **)
56615+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
56616+
56617+ if (!s_tmp->obj_hash)
56618+ return ERR_PTR(-ENOMEM);
56619+
56620+ memset(s_tmp->obj_hash, 0,
56621+ s_tmp->obj_hash_size *
56622+ sizeof (struct acl_object_label *));
56623+
56624+ /* add in objects */
56625+ err = copy_user_objs(ghash.first, s_tmp, role);
56626+
56627+ if (err)
56628+ return ERR_PTR(err);
56629+
56630+ /* set pointer for parent subject */
56631+ if (s_tmp->parent_subject) {
56632+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
56633+
56634+ if (IS_ERR(s_tmp2))
56635+ return s_tmp2;
56636+
56637+ s_tmp->parent_subject = s_tmp2;
56638+ }
56639+
56640+ /* add in ip acls */
56641+
56642+ if (!s_tmp->ip_num) {
56643+ s_tmp->ips = NULL;
56644+ goto insert;
56645+ }
56646+
56647+ i_tmp =
56648+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
56649+ sizeof (struct acl_ip_label *));
56650+
56651+ if (!i_tmp)
56652+ return ERR_PTR(-ENOMEM);
56653+
56654+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
56655+ *(i_tmp + i_num) =
56656+ (struct acl_ip_label *)
56657+ acl_alloc(sizeof (struct acl_ip_label));
56658+ if (!*(i_tmp + i_num))
56659+ return ERR_PTR(-ENOMEM);
56660+
56661+ if (copy_from_user
56662+ (&i_utmp2, s_tmp->ips + i_num,
56663+ sizeof (struct acl_ip_label *)))
56664+ return ERR_PTR(-EFAULT);
56665+
56666+ if (copy_from_user
56667+ (*(i_tmp + i_num), i_utmp2,
56668+ sizeof (struct acl_ip_label)))
56669+ return ERR_PTR(-EFAULT);
56670+
56671+ if ((*(i_tmp + i_num))->iface == NULL)
56672+ continue;
56673+
56674+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
56675+ if (!len || len >= IFNAMSIZ)
56676+ return ERR_PTR(-EINVAL);
56677+ tmp = acl_alloc(len);
56678+ if (tmp == NULL)
56679+ return ERR_PTR(-ENOMEM);
56680+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
56681+ return ERR_PTR(-EFAULT);
56682+ (*(i_tmp + i_num))->iface = tmp;
56683+ }
56684+
56685+ s_tmp->ips = i_tmp;
56686+
56687+insert:
56688+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
56689+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
56690+ return ERR_PTR(-ENOMEM);
56691+
56692+ return s_tmp;
56693+}
56694+
56695+static int
56696+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
56697+{
56698+ struct acl_subject_label s_pre;
56699+ struct acl_subject_label * ret;
56700+ int err;
56701+
56702+ while (userp) {
56703+ if (copy_from_user(&s_pre, userp,
56704+ sizeof (struct acl_subject_label)))
56705+ return -EFAULT;
56706+
56707+ /* do not add nested subjects here, add
56708+ while parsing objects
56709+ */
56710+
56711+ if (s_pre.mode & GR_NESTED) {
56712+ userp = s_pre.prev;
56713+ continue;
56714+ }
56715+
56716+ ret = do_copy_user_subj(userp, role);
56717+
56718+ err = PTR_ERR(ret);
56719+ if (IS_ERR(ret))
56720+ return err;
56721+
56722+ insert_acl_subj_label(ret, role);
56723+
56724+ userp = s_pre.prev;
56725+ }
56726+
56727+ return 0;
56728+}
56729+
56730+static int
56731+copy_user_acl(struct gr_arg *arg)
56732+{
56733+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
56734+ struct sprole_pw *sptmp;
56735+ struct gr_hash_struct *ghash;
56736+ uid_t *domainlist;
56737+ unsigned int r_num;
56738+ unsigned int len;
56739+ char *tmp;
56740+ int err = 0;
56741+ __u16 i;
56742+ __u32 num_subjs;
56743+
56744+ /* we need a default and kernel role */
56745+ if (arg->role_db.num_roles < 2)
56746+ return -EINVAL;
56747+
56748+ /* copy special role authentication info from userspace */
56749+
56750+ num_sprole_pws = arg->num_sprole_pws;
56751+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
56752+
56753+ if (!acl_special_roles) {
56754+ err = -ENOMEM;
56755+ goto cleanup;
56756+ }
56757+
56758+ for (i = 0; i < num_sprole_pws; i++) {
56759+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
56760+ if (!sptmp) {
56761+ err = -ENOMEM;
56762+ goto cleanup;
56763+ }
56764+ if (copy_from_user(sptmp, arg->sprole_pws + i,
56765+ sizeof (struct sprole_pw))) {
56766+ err = -EFAULT;
56767+ goto cleanup;
56768+ }
56769+
56770+ len =
56771+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
56772+
56773+ if (!len || len >= GR_SPROLE_LEN) {
56774+ err = -EINVAL;
56775+ goto cleanup;
56776+ }
56777+
56778+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
56779+ err = -ENOMEM;
56780+ goto cleanup;
56781+ }
56782+
56783+ if (copy_from_user(tmp, sptmp->rolename, len)) {
56784+ err = -EFAULT;
56785+ goto cleanup;
56786+ }
56787+ tmp[len-1] = '\0';
56788+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56789+ printk(KERN_ALERT "Copying special role %s\n", tmp);
56790+#endif
56791+ sptmp->rolename = tmp;
56792+ acl_special_roles[i] = sptmp;
56793+ }
56794+
56795+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
56796+
56797+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
56798+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
56799+
56800+ if (!r_tmp) {
56801+ err = -ENOMEM;
56802+ goto cleanup;
56803+ }
56804+
56805+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
56806+ sizeof (struct acl_role_label *))) {
56807+ err = -EFAULT;
56808+ goto cleanup;
56809+ }
56810+
56811+ if (copy_from_user(r_tmp, r_utmp2,
56812+ sizeof (struct acl_role_label))) {
56813+ err = -EFAULT;
56814+ goto cleanup;
56815+ }
56816+
56817+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
56818+
56819+ if (!len || len >= PATH_MAX) {
56820+ err = -EINVAL;
56821+ goto cleanup;
56822+ }
56823+
56824+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
56825+ err = -ENOMEM;
56826+ goto cleanup;
56827+ }
56828+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
56829+ err = -EFAULT;
56830+ goto cleanup;
56831+ }
56832+ tmp[len-1] = '\0';
56833+ r_tmp->rolename = tmp;
56834+
56835+ if (!strcmp(r_tmp->rolename, "default")
56836+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
56837+ default_role = r_tmp;
56838+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
56839+ kernel_role = r_tmp;
56840+ }
56841+
56842+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
56843+ err = -ENOMEM;
56844+ goto cleanup;
56845+ }
56846+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
56847+ err = -EFAULT;
56848+ goto cleanup;
56849+ }
56850+
56851+ r_tmp->hash = ghash;
56852+
56853+ num_subjs = count_user_subjs(r_tmp->hash->first);
56854+
56855+ r_tmp->subj_hash_size = num_subjs;
56856+ r_tmp->subj_hash =
56857+ (struct acl_subject_label **)
56858+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
56859+
56860+ if (!r_tmp->subj_hash) {
56861+ err = -ENOMEM;
56862+ goto cleanup;
56863+ }
56864+
56865+ err = copy_user_allowedips(r_tmp);
56866+ if (err)
56867+ goto cleanup;
56868+
56869+ /* copy domain info */
56870+ if (r_tmp->domain_children != NULL) {
56871+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
56872+ if (domainlist == NULL) {
56873+ err = -ENOMEM;
56874+ goto cleanup;
56875+ }
56876+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
56877+ err = -EFAULT;
56878+ goto cleanup;
56879+ }
56880+ r_tmp->domain_children = domainlist;
56881+ }
56882+
56883+ err = copy_user_transitions(r_tmp);
56884+ if (err)
56885+ goto cleanup;
56886+
56887+ memset(r_tmp->subj_hash, 0,
56888+ r_tmp->subj_hash_size *
56889+ sizeof (struct acl_subject_label *));
56890+
56891+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
56892+
56893+ if (err)
56894+ goto cleanup;
56895+
56896+ /* set nested subject list to null */
56897+ r_tmp->hash->first = NULL;
56898+
56899+ insert_acl_role_label(r_tmp);
56900+ }
56901+
56902+ goto return_err;
56903+ cleanup:
56904+ free_variables();
56905+ return_err:
56906+ return err;
56907+
56908+}
56909+
56910+static int
56911+gracl_init(struct gr_arg *args)
56912+{
56913+ int error = 0;
56914+
56915+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
56916+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
56917+
56918+ if (init_variables(args)) {
56919+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
56920+ error = -ENOMEM;
56921+ free_variables();
56922+ goto out;
56923+ }
56924+
56925+ error = copy_user_acl(args);
56926+ free_init_variables();
56927+ if (error) {
56928+ free_variables();
56929+ goto out;
56930+ }
56931+
56932+ if ((error = gr_set_acls(0))) {
56933+ free_variables();
56934+ goto out;
56935+ }
56936+
56937+ pax_open_kernel();
56938+ gr_status |= GR_READY;
56939+ pax_close_kernel();
56940+
56941+ out:
56942+ return error;
56943+}
56944+
56945+/* derived from glibc fnmatch() 0: match, 1: no match*/
56946+
56947+static int
56948+glob_match(const char *p, const char *n)
56949+{
56950+ char c;
56951+
56952+ while ((c = *p++) != '\0') {
56953+ switch (c) {
56954+ case '?':
56955+ if (*n == '\0')
56956+ return 1;
56957+ else if (*n == '/')
56958+ return 1;
56959+ break;
56960+ case '\\':
56961+ if (*n != c)
56962+ return 1;
56963+ break;
56964+ case '*':
56965+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
56966+ if (*n == '/')
56967+ return 1;
56968+ else if (c == '?') {
56969+ if (*n == '\0')
56970+ return 1;
56971+ else
56972+ ++n;
56973+ }
56974+ }
56975+ if (c == '\0') {
56976+ return 0;
56977+ } else {
56978+ const char *endp;
56979+
56980+ if ((endp = strchr(n, '/')) == NULL)
56981+ endp = n + strlen(n);
56982+
56983+ if (c == '[') {
56984+ for (--p; n < endp; ++n)
56985+ if (!glob_match(p, n))
56986+ return 0;
56987+ } else if (c == '/') {
56988+ while (*n != '\0' && *n != '/')
56989+ ++n;
56990+ if (*n == '/' && !glob_match(p, n + 1))
56991+ return 0;
56992+ } else {
56993+ for (--p; n < endp; ++n)
56994+ if (*n == c && !glob_match(p, n))
56995+ return 0;
56996+ }
56997+
56998+ return 1;
56999+ }
57000+ case '[':
57001+ {
57002+ int not;
57003+ char cold;
57004+
57005+ if (*n == '\0' || *n == '/')
57006+ return 1;
57007+
57008+ not = (*p == '!' || *p == '^');
57009+ if (not)
57010+ ++p;
57011+
57012+ c = *p++;
57013+ for (;;) {
57014+ unsigned char fn = (unsigned char)*n;
57015+
57016+ if (c == '\0')
57017+ return 1;
57018+ else {
57019+ if (c == fn)
57020+ goto matched;
57021+ cold = c;
57022+ c = *p++;
57023+
57024+ if (c == '-' && *p != ']') {
57025+ unsigned char cend = *p++;
57026+
57027+ if (cend == '\0')
57028+ return 1;
57029+
57030+ if (cold <= fn && fn <= cend)
57031+ goto matched;
57032+
57033+ c = *p++;
57034+ }
57035+ }
57036+
57037+ if (c == ']')
57038+ break;
57039+ }
57040+ if (!not)
57041+ return 1;
57042+ break;
57043+ matched:
57044+ while (c != ']') {
57045+ if (c == '\0')
57046+ return 1;
57047+
57048+ c = *p++;
57049+ }
57050+ if (not)
57051+ return 1;
57052+ }
57053+ break;
57054+ default:
57055+ if (c != *n)
57056+ return 1;
57057+ }
57058+
57059+ ++n;
57060+ }
57061+
57062+ if (*n == '\0')
57063+ return 0;
57064+
57065+ if (*n == '/')
57066+ return 0;
57067+
57068+ return 1;
57069+}
57070+
57071+static struct acl_object_label *
57072+chk_glob_label(struct acl_object_label *globbed,
57073+ struct dentry *dentry, struct vfsmount *mnt, char **path)
57074+{
57075+ struct acl_object_label *tmp;
57076+
57077+ if (*path == NULL)
57078+ *path = gr_to_filename_nolock(dentry, mnt);
57079+
57080+ tmp = globbed;
57081+
57082+ while (tmp) {
57083+ if (!glob_match(tmp->filename, *path))
57084+ return tmp;
57085+ tmp = tmp->next;
57086+ }
57087+
57088+ return NULL;
57089+}
57090+
57091+static struct acl_object_label *
57092+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57093+ const ino_t curr_ino, const dev_t curr_dev,
57094+ const struct acl_subject_label *subj, char **path, const int checkglob)
57095+{
57096+ struct acl_subject_label *tmpsubj;
57097+ struct acl_object_label *retval;
57098+ struct acl_object_label *retval2;
57099+
57100+ tmpsubj = (struct acl_subject_label *) subj;
57101+ read_lock(&gr_inode_lock);
57102+ do {
57103+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57104+ if (retval) {
57105+ if (checkglob && retval->globbed) {
57106+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57107+ (struct vfsmount *)orig_mnt, path);
57108+ if (retval2)
57109+ retval = retval2;
57110+ }
57111+ break;
57112+ }
57113+ } while ((tmpsubj = tmpsubj->parent_subject));
57114+ read_unlock(&gr_inode_lock);
57115+
57116+ return retval;
57117+}
57118+
57119+static __inline__ struct acl_object_label *
57120+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57121+ const struct dentry *curr_dentry,
57122+ const struct acl_subject_label *subj, char **path, const int checkglob)
57123+{
57124+ int newglob = checkglob;
57125+
57126+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57127+ as we don't want a / * rule to match instead of the / object
57128+ don't do this for create lookups that call this function though, since they're looking up
57129+ on the parent and thus need globbing checks on all paths
57130+ */
57131+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57132+ newglob = GR_NO_GLOB;
57133+
57134+ return __full_lookup(orig_dentry, orig_mnt,
57135+ curr_dentry->d_inode->i_ino,
57136+ __get_dev(curr_dentry), subj, path, newglob);
57137+}
57138+
57139+static struct acl_object_label *
57140+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57141+ const struct acl_subject_label *subj, char *path, const int checkglob)
57142+{
57143+ struct dentry *dentry = (struct dentry *) l_dentry;
57144+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57145+ struct acl_object_label *retval;
57146+
57147+ spin_lock(&dcache_lock);
57148+ spin_lock(&vfsmount_lock);
57149+
57150+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57151+#ifdef CONFIG_NET
57152+ mnt == sock_mnt ||
57153+#endif
57154+#ifdef CONFIG_HUGETLBFS
57155+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57156+#endif
57157+ /* ignore Eric Biederman */
57158+ IS_PRIVATE(l_dentry->d_inode))) {
57159+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57160+ goto out;
57161+ }
57162+
57163+ for (;;) {
57164+ if (dentry == real_root && mnt == real_root_mnt)
57165+ break;
57166+
57167+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57168+ if (mnt->mnt_parent == mnt)
57169+ break;
57170+
57171+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57172+ if (retval != NULL)
57173+ goto out;
57174+
57175+ dentry = mnt->mnt_mountpoint;
57176+ mnt = mnt->mnt_parent;
57177+ continue;
57178+ }
57179+
57180+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57181+ if (retval != NULL)
57182+ goto out;
57183+
57184+ dentry = dentry->d_parent;
57185+ }
57186+
57187+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57188+
57189+ if (retval == NULL)
57190+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57191+out:
57192+ spin_unlock(&vfsmount_lock);
57193+ spin_unlock(&dcache_lock);
57194+
57195+ BUG_ON(retval == NULL);
57196+
57197+ return retval;
57198+}
57199+
57200+static __inline__ struct acl_object_label *
57201+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57202+ const struct acl_subject_label *subj)
57203+{
57204+ char *path = NULL;
57205+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57206+}
57207+
57208+static __inline__ struct acl_object_label *
57209+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57210+ const struct acl_subject_label *subj)
57211+{
57212+ char *path = NULL;
57213+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57214+}
57215+
57216+static __inline__ struct acl_object_label *
57217+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57218+ const struct acl_subject_label *subj, char *path)
57219+{
57220+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57221+}
57222+
57223+static struct acl_subject_label *
57224+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57225+ const struct acl_role_label *role)
57226+{
57227+ struct dentry *dentry = (struct dentry *) l_dentry;
57228+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57229+ struct acl_subject_label *retval;
57230+
57231+ spin_lock(&dcache_lock);
57232+ spin_lock(&vfsmount_lock);
57233+
57234+ for (;;) {
57235+ if (dentry == real_root && mnt == real_root_mnt)
57236+ break;
57237+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57238+ if (mnt->mnt_parent == mnt)
57239+ break;
57240+
57241+ read_lock(&gr_inode_lock);
57242+ retval =
57243+ lookup_acl_subj_label(dentry->d_inode->i_ino,
57244+ __get_dev(dentry), role);
57245+ read_unlock(&gr_inode_lock);
57246+ if (retval != NULL)
57247+ goto out;
57248+
57249+ dentry = mnt->mnt_mountpoint;
57250+ mnt = mnt->mnt_parent;
57251+ continue;
57252+ }
57253+
57254+ read_lock(&gr_inode_lock);
57255+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57256+ __get_dev(dentry), role);
57257+ read_unlock(&gr_inode_lock);
57258+ if (retval != NULL)
57259+ goto out;
57260+
57261+ dentry = dentry->d_parent;
57262+ }
57263+
57264+ read_lock(&gr_inode_lock);
57265+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57266+ __get_dev(dentry), role);
57267+ read_unlock(&gr_inode_lock);
57268+
57269+ if (unlikely(retval == NULL)) {
57270+ read_lock(&gr_inode_lock);
57271+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57272+ __get_dev(real_root), role);
57273+ read_unlock(&gr_inode_lock);
57274+ }
57275+out:
57276+ spin_unlock(&vfsmount_lock);
57277+ spin_unlock(&dcache_lock);
57278+
57279+ BUG_ON(retval == NULL);
57280+
57281+ return retval;
57282+}
57283+
57284+static void
57285+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57286+{
57287+ struct task_struct *task = current;
57288+ const struct cred *cred = current_cred();
57289+
57290+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57291+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57292+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57293+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57294+
57295+ return;
57296+}
57297+
57298+static void
57299+gr_log_learn_sysctl(const char *path, const __u32 mode)
57300+{
57301+ struct task_struct *task = current;
57302+ const struct cred *cred = current_cred();
57303+
57304+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57305+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57306+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57307+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57308+
57309+ return;
57310+}
57311+
57312+static void
57313+gr_log_learn_id_change(const char type, const unsigned int real,
57314+ const unsigned int effective, const unsigned int fs)
57315+{
57316+ struct task_struct *task = current;
57317+ const struct cred *cred = current_cred();
57318+
57319+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
57320+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57321+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57322+ type, real, effective, fs, &task->signal->saved_ip);
57323+
57324+ return;
57325+}
57326+
57327+__u32
57328+gr_search_file(const struct dentry * dentry, const __u32 mode,
57329+ const struct vfsmount * mnt)
57330+{
57331+ __u32 retval = mode;
57332+ struct acl_subject_label *curracl;
57333+ struct acl_object_label *currobj;
57334+
57335+ if (unlikely(!(gr_status & GR_READY)))
57336+ return (mode & ~GR_AUDITS);
57337+
57338+ curracl = current->acl;
57339+
57340+ currobj = chk_obj_label(dentry, mnt, curracl);
57341+ retval = currobj->mode & mode;
57342+
57343+ /* if we're opening a specified transfer file for writing
57344+ (e.g. /dev/initctl), then transfer our role to init
57345+ */
57346+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
57347+ current->role->roletype & GR_ROLE_PERSIST)) {
57348+ struct task_struct *task = init_pid_ns.child_reaper;
57349+
57350+ if (task->role != current->role) {
57351+ task->acl_sp_role = 0;
57352+ task->acl_role_id = current->acl_role_id;
57353+ task->role = current->role;
57354+ rcu_read_lock();
57355+ read_lock(&grsec_exec_file_lock);
57356+ gr_apply_subject_to_task(task);
57357+ read_unlock(&grsec_exec_file_lock);
57358+ rcu_read_unlock();
57359+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
57360+ }
57361+ }
57362+
57363+ if (unlikely
57364+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
57365+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
57366+ __u32 new_mode = mode;
57367+
57368+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57369+
57370+ retval = new_mode;
57371+
57372+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
57373+ new_mode |= GR_INHERIT;
57374+
57375+ if (!(mode & GR_NOLEARN))
57376+ gr_log_learn(dentry, mnt, new_mode);
57377+ }
57378+
57379+ return retval;
57380+}
57381+
57382+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
57383+ const struct dentry *parent,
57384+ const struct vfsmount *mnt)
57385+{
57386+ struct name_entry *match;
57387+ struct acl_object_label *matchpo;
57388+ struct acl_subject_label *curracl;
57389+ char *path;
57390+
57391+ if (unlikely(!(gr_status & GR_READY)))
57392+ return NULL;
57393+
57394+ preempt_disable();
57395+ path = gr_to_filename_rbac(new_dentry, mnt);
57396+ match = lookup_name_entry_create(path);
57397+
57398+ curracl = current->acl;
57399+
57400+ if (match) {
57401+ read_lock(&gr_inode_lock);
57402+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
57403+ read_unlock(&gr_inode_lock);
57404+
57405+ if (matchpo) {
57406+ preempt_enable();
57407+ return matchpo;
57408+ }
57409+ }
57410+
57411+ // lookup parent
57412+
57413+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
57414+
57415+ preempt_enable();
57416+ return matchpo;
57417+}
57418+
57419+__u32
57420+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
57421+ const struct vfsmount * mnt, const __u32 mode)
57422+{
57423+ struct acl_object_label *matchpo;
57424+ __u32 retval;
57425+
57426+ if (unlikely(!(gr_status & GR_READY)))
57427+ return (mode & ~GR_AUDITS);
57428+
57429+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
57430+
57431+ retval = matchpo->mode & mode;
57432+
57433+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
57434+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
57435+ __u32 new_mode = mode;
57436+
57437+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57438+
57439+ gr_log_learn(new_dentry, mnt, new_mode);
57440+ return new_mode;
57441+ }
57442+
57443+ return retval;
57444+}
57445+
57446+__u32
57447+gr_check_link(const struct dentry * new_dentry,
57448+ const struct dentry * parent_dentry,
57449+ const struct vfsmount * parent_mnt,
57450+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
57451+{
57452+ struct acl_object_label *obj;
57453+ __u32 oldmode, newmode;
57454+ __u32 needmode;
57455+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
57456+ GR_DELETE | GR_INHERIT;
57457+
57458+ if (unlikely(!(gr_status & GR_READY)))
57459+ return (GR_CREATE | GR_LINK);
57460+
57461+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
57462+ oldmode = obj->mode;
57463+
57464+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
57465+ newmode = obj->mode;
57466+
57467+ needmode = newmode & checkmodes;
57468+
57469+ // old name for hardlink must have at least the permissions of the new name
57470+ if ((oldmode & needmode) != needmode)
57471+ goto bad;
57472+
57473+ // if old name had restrictions/auditing, make sure the new name does as well
57474+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
57475+
57476+ // don't allow hardlinking of suid/sgid files without permission
57477+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57478+ needmode |= GR_SETID;
57479+
57480+ if ((newmode & needmode) != needmode)
57481+ goto bad;
57482+
57483+ // enforce minimum permissions
57484+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
57485+ return newmode;
57486+bad:
57487+ needmode = oldmode;
57488+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57489+ needmode |= GR_SETID;
57490+
57491+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
57492+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
57493+ return (GR_CREATE | GR_LINK);
57494+ } else if (newmode & GR_SUPPRESS)
57495+ return GR_SUPPRESS;
57496+ else
57497+ return 0;
57498+}
57499+
57500+int
57501+gr_check_hidden_task(const struct task_struct *task)
57502+{
57503+ if (unlikely(!(gr_status & GR_READY)))
57504+ return 0;
57505+
57506+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
57507+ return 1;
57508+
57509+ return 0;
57510+}
57511+
57512+int
57513+gr_check_protected_task(const struct task_struct *task)
57514+{
57515+ if (unlikely(!(gr_status & GR_READY) || !task))
57516+ return 0;
57517+
57518+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57519+ task->acl != current->acl)
57520+ return 1;
57521+
57522+ return 0;
57523+}
57524+
57525+int
57526+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57527+{
57528+ struct task_struct *p;
57529+ int ret = 0;
57530+
57531+ if (unlikely(!(gr_status & GR_READY) || !pid))
57532+ return ret;
57533+
57534+ read_lock(&tasklist_lock);
57535+ do_each_pid_task(pid, type, p) {
57536+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57537+ p->acl != current->acl) {
57538+ ret = 1;
57539+ goto out;
57540+ }
57541+ } while_each_pid_task(pid, type, p);
57542+out:
57543+ read_unlock(&tasklist_lock);
57544+
57545+ return ret;
57546+}
57547+
57548+void
57549+gr_copy_label(struct task_struct *tsk)
57550+{
57551+ tsk->signal->used_accept = 0;
57552+ tsk->acl_sp_role = 0;
57553+ tsk->acl_role_id = current->acl_role_id;
57554+ tsk->acl = current->acl;
57555+ tsk->role = current->role;
57556+ tsk->signal->curr_ip = current->signal->curr_ip;
57557+ tsk->signal->saved_ip = current->signal->saved_ip;
57558+ if (current->exec_file)
57559+ get_file(current->exec_file);
57560+ tsk->exec_file = current->exec_file;
57561+ tsk->is_writable = current->is_writable;
57562+ if (unlikely(current->signal->used_accept)) {
57563+ current->signal->curr_ip = 0;
57564+ current->signal->saved_ip = 0;
57565+ }
57566+
57567+ return;
57568+}
57569+
57570+static void
57571+gr_set_proc_res(struct task_struct *task)
57572+{
57573+ struct acl_subject_label *proc;
57574+ unsigned short i;
57575+
57576+ proc = task->acl;
57577+
57578+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
57579+ return;
57580+
57581+ for (i = 0; i < RLIM_NLIMITS; i++) {
57582+ if (!(proc->resmask & (1 << i)))
57583+ continue;
57584+
57585+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
57586+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
57587+ }
57588+
57589+ return;
57590+}
57591+
57592+extern int __gr_process_user_ban(struct user_struct *user);
57593+
57594+int
57595+gr_check_user_change(int real, int effective, int fs)
57596+{
57597+ unsigned int i;
57598+ __u16 num;
57599+ uid_t *uidlist;
57600+ int curuid;
57601+ int realok = 0;
57602+ int effectiveok = 0;
57603+ int fsok = 0;
57604+
57605+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57606+ struct user_struct *user;
57607+
57608+ if (real == -1)
57609+ goto skipit;
57610+
57611+ user = find_user(real);
57612+ if (user == NULL)
57613+ goto skipit;
57614+
57615+ if (__gr_process_user_ban(user)) {
57616+ /* for find_user */
57617+ free_uid(user);
57618+ return 1;
57619+ }
57620+
57621+ /* for find_user */
57622+ free_uid(user);
57623+
57624+skipit:
57625+#endif
57626+
57627+ if (unlikely(!(gr_status & GR_READY)))
57628+ return 0;
57629+
57630+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57631+ gr_log_learn_id_change('u', real, effective, fs);
57632+
57633+ num = current->acl->user_trans_num;
57634+ uidlist = current->acl->user_transitions;
57635+
57636+ if (uidlist == NULL)
57637+ return 0;
57638+
57639+ if (real == -1)
57640+ realok = 1;
57641+ if (effective == -1)
57642+ effectiveok = 1;
57643+ if (fs == -1)
57644+ fsok = 1;
57645+
57646+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
57647+ for (i = 0; i < num; i++) {
57648+ curuid = (int)uidlist[i];
57649+ if (real == curuid)
57650+ realok = 1;
57651+ if (effective == curuid)
57652+ effectiveok = 1;
57653+ if (fs == curuid)
57654+ fsok = 1;
57655+ }
57656+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
57657+ for (i = 0; i < num; i++) {
57658+ curuid = (int)uidlist[i];
57659+ if (real == curuid)
57660+ break;
57661+ if (effective == curuid)
57662+ break;
57663+ if (fs == curuid)
57664+ break;
57665+ }
57666+ /* not in deny list */
57667+ if (i == num) {
57668+ realok = 1;
57669+ effectiveok = 1;
57670+ fsok = 1;
57671+ }
57672+ }
57673+
57674+ if (realok && effectiveok && fsok)
57675+ return 0;
57676+ else {
57677+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57678+ return 1;
57679+ }
57680+}
57681+
57682+int
57683+gr_check_group_change(int real, int effective, int fs)
57684+{
57685+ unsigned int i;
57686+ __u16 num;
57687+ gid_t *gidlist;
57688+ int curgid;
57689+ int realok = 0;
57690+ int effectiveok = 0;
57691+ int fsok = 0;
57692+
57693+ if (unlikely(!(gr_status & GR_READY)))
57694+ return 0;
57695+
57696+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57697+ gr_log_learn_id_change('g', real, effective, fs);
57698+
57699+ num = current->acl->group_trans_num;
57700+ gidlist = current->acl->group_transitions;
57701+
57702+ if (gidlist == NULL)
57703+ return 0;
57704+
57705+ if (real == -1)
57706+ realok = 1;
57707+ if (effective == -1)
57708+ effectiveok = 1;
57709+ if (fs == -1)
57710+ fsok = 1;
57711+
57712+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
57713+ for (i = 0; i < num; i++) {
57714+ curgid = (int)gidlist[i];
57715+ if (real == curgid)
57716+ realok = 1;
57717+ if (effective == curgid)
57718+ effectiveok = 1;
57719+ if (fs == curgid)
57720+ fsok = 1;
57721+ }
57722+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
57723+ for (i = 0; i < num; i++) {
57724+ curgid = (int)gidlist[i];
57725+ if (real == curgid)
57726+ break;
57727+ if (effective == curgid)
57728+ break;
57729+ if (fs == curgid)
57730+ break;
57731+ }
57732+ /* not in deny list */
57733+ if (i == num) {
57734+ realok = 1;
57735+ effectiveok = 1;
57736+ fsok = 1;
57737+ }
57738+ }
57739+
57740+ if (realok && effectiveok && fsok)
57741+ return 0;
57742+ else {
57743+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57744+ return 1;
57745+ }
57746+}
57747+
57748+void
57749+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
57750+{
57751+ struct acl_role_label *role = task->role;
57752+ struct acl_subject_label *subj = NULL;
57753+ struct acl_object_label *obj;
57754+ struct file *filp;
57755+
57756+ if (unlikely(!(gr_status & GR_READY)))
57757+ return;
57758+
57759+ filp = task->exec_file;
57760+
57761+ /* kernel process, we'll give them the kernel role */
57762+ if (unlikely(!filp)) {
57763+ task->role = kernel_role;
57764+ task->acl = kernel_role->root_label;
57765+ return;
57766+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
57767+ role = lookup_acl_role_label(task, uid, gid);
57768+
57769+ /* perform subject lookup in possibly new role
57770+ we can use this result below in the case where role == task->role
57771+ */
57772+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
57773+
57774+ /* if we changed uid/gid, but result in the same role
57775+ and are using inheritance, don't lose the inherited subject
57776+ if current subject is other than what normal lookup
57777+ would result in, we arrived via inheritance, don't
57778+ lose subject
57779+ */
57780+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
57781+ (subj == task->acl)))
57782+ task->acl = subj;
57783+
57784+ task->role = role;
57785+
57786+ task->is_writable = 0;
57787+
57788+ /* ignore additional mmap checks for processes that are writable
57789+ by the default ACL */
57790+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57791+ if (unlikely(obj->mode & GR_WRITE))
57792+ task->is_writable = 1;
57793+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
57794+ if (unlikely(obj->mode & GR_WRITE))
57795+ task->is_writable = 1;
57796+
57797+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57798+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57799+#endif
57800+
57801+ gr_set_proc_res(task);
57802+
57803+ return;
57804+}
57805+
57806+int
57807+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57808+ const int unsafe_share)
57809+{
57810+ struct task_struct *task = current;
57811+ struct acl_subject_label *newacl;
57812+ struct acl_object_label *obj;
57813+ __u32 retmode;
57814+
57815+ if (unlikely(!(gr_status & GR_READY)))
57816+ return 0;
57817+
57818+ newacl = chk_subj_label(dentry, mnt, task->role);
57819+
57820+ task_lock(task);
57821+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
57822+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
57823+ !(task->role->roletype & GR_ROLE_GOD) &&
57824+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
57825+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
57826+ task_unlock(task);
57827+ if (unsafe_share)
57828+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
57829+ else
57830+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
57831+ return -EACCES;
57832+ }
57833+ task_unlock(task);
57834+
57835+ obj = chk_obj_label(dentry, mnt, task->acl);
57836+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
57837+
57838+ if (!(task->acl->mode & GR_INHERITLEARN) &&
57839+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
57840+ if (obj->nested)
57841+ task->acl = obj->nested;
57842+ else
57843+ task->acl = newacl;
57844+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
57845+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
57846+
57847+ task->is_writable = 0;
57848+
57849+ /* ignore additional mmap checks for processes that are writable
57850+ by the default ACL */
57851+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
57852+ if (unlikely(obj->mode & GR_WRITE))
57853+ task->is_writable = 1;
57854+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
57855+ if (unlikely(obj->mode & GR_WRITE))
57856+ task->is_writable = 1;
57857+
57858+ gr_set_proc_res(task);
57859+
57860+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57861+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
57862+#endif
57863+ return 0;
57864+}
57865+
57866+/* always called with valid inodev ptr */
57867+static void
57868+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
57869+{
57870+ struct acl_object_label *matchpo;
57871+ struct acl_subject_label *matchps;
57872+ struct acl_subject_label *subj;
57873+ struct acl_role_label *role;
57874+ unsigned int x;
57875+
57876+ FOR_EACH_ROLE_START(role)
57877+ FOR_EACH_SUBJECT_START(role, subj, x)
57878+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
57879+ matchpo->mode |= GR_DELETED;
57880+ FOR_EACH_SUBJECT_END(subj,x)
57881+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
57882+ if (subj->inode == ino && subj->device == dev)
57883+ subj->mode |= GR_DELETED;
57884+ FOR_EACH_NESTED_SUBJECT_END(subj)
57885+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
57886+ matchps->mode |= GR_DELETED;
57887+ FOR_EACH_ROLE_END(role)
57888+
57889+ inodev->nentry->deleted = 1;
57890+
57891+ return;
57892+}
57893+
57894+void
57895+gr_handle_delete(const ino_t ino, const dev_t dev)
57896+{
57897+ struct inodev_entry *inodev;
57898+
57899+ if (unlikely(!(gr_status & GR_READY)))
57900+ return;
57901+
57902+ write_lock(&gr_inode_lock);
57903+ inodev = lookup_inodev_entry(ino, dev);
57904+ if (inodev != NULL)
57905+ do_handle_delete(inodev, ino, dev);
57906+ write_unlock(&gr_inode_lock);
57907+
57908+ return;
57909+}
57910+
57911+static void
57912+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
57913+ const ino_t newinode, const dev_t newdevice,
57914+ struct acl_subject_label *subj)
57915+{
57916+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
57917+ struct acl_object_label *match;
57918+
57919+ match = subj->obj_hash[index];
57920+
57921+ while (match && (match->inode != oldinode ||
57922+ match->device != olddevice ||
57923+ !(match->mode & GR_DELETED)))
57924+ match = match->next;
57925+
57926+ if (match && (match->inode == oldinode)
57927+ && (match->device == olddevice)
57928+ && (match->mode & GR_DELETED)) {
57929+ if (match->prev == NULL) {
57930+ subj->obj_hash[index] = match->next;
57931+ if (match->next != NULL)
57932+ match->next->prev = NULL;
57933+ } else {
57934+ match->prev->next = match->next;
57935+ if (match->next != NULL)
57936+ match->next->prev = match->prev;
57937+ }
57938+ match->prev = NULL;
57939+ match->next = NULL;
57940+ match->inode = newinode;
57941+ match->device = newdevice;
57942+ match->mode &= ~GR_DELETED;
57943+
57944+ insert_acl_obj_label(match, subj);
57945+ }
57946+
57947+ return;
57948+}
57949+
57950+static void
57951+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
57952+ const ino_t newinode, const dev_t newdevice,
57953+ struct acl_role_label *role)
57954+{
57955+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
57956+ struct acl_subject_label *match;
57957+
57958+ match = role->subj_hash[index];
57959+
57960+ while (match && (match->inode != oldinode ||
57961+ match->device != olddevice ||
57962+ !(match->mode & GR_DELETED)))
57963+ match = match->next;
57964+
57965+ if (match && (match->inode == oldinode)
57966+ && (match->device == olddevice)
57967+ && (match->mode & GR_DELETED)) {
57968+ if (match->prev == NULL) {
57969+ role->subj_hash[index] = match->next;
57970+ if (match->next != NULL)
57971+ match->next->prev = NULL;
57972+ } else {
57973+ match->prev->next = match->next;
57974+ if (match->next != NULL)
57975+ match->next->prev = match->prev;
57976+ }
57977+ match->prev = NULL;
57978+ match->next = NULL;
57979+ match->inode = newinode;
57980+ match->device = newdevice;
57981+ match->mode &= ~GR_DELETED;
57982+
57983+ insert_acl_subj_label(match, role);
57984+ }
57985+
57986+ return;
57987+}
57988+
57989+static void
57990+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
57991+ const ino_t newinode, const dev_t newdevice)
57992+{
57993+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
57994+ struct inodev_entry *match;
57995+
57996+ match = inodev_set.i_hash[index];
57997+
57998+ while (match && (match->nentry->inode != oldinode ||
57999+ match->nentry->device != olddevice || !match->nentry->deleted))
58000+ match = match->next;
58001+
58002+ if (match && (match->nentry->inode == oldinode)
58003+ && (match->nentry->device == olddevice) &&
58004+ match->nentry->deleted) {
58005+ if (match->prev == NULL) {
58006+ inodev_set.i_hash[index] = match->next;
58007+ if (match->next != NULL)
58008+ match->next->prev = NULL;
58009+ } else {
58010+ match->prev->next = match->next;
58011+ if (match->next != NULL)
58012+ match->next->prev = match->prev;
58013+ }
58014+ match->prev = NULL;
58015+ match->next = NULL;
58016+ match->nentry->inode = newinode;
58017+ match->nentry->device = newdevice;
58018+ match->nentry->deleted = 0;
58019+
58020+ insert_inodev_entry(match);
58021+ }
58022+
58023+ return;
58024+}
58025+
58026+static void
58027+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58028+{
58029+ struct acl_subject_label *subj;
58030+ struct acl_role_label *role;
58031+ unsigned int x;
58032+
58033+ FOR_EACH_ROLE_START(role)
58034+ update_acl_subj_label(matchn->inode, matchn->device,
58035+ inode, dev, role);
58036+
58037+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58038+ if ((subj->inode == inode) && (subj->device == dev)) {
58039+ subj->inode = inode;
58040+ subj->device = dev;
58041+ }
58042+ FOR_EACH_NESTED_SUBJECT_END(subj)
58043+ FOR_EACH_SUBJECT_START(role, subj, x)
58044+ update_acl_obj_label(matchn->inode, matchn->device,
58045+ inode, dev, subj);
58046+ FOR_EACH_SUBJECT_END(subj,x)
58047+ FOR_EACH_ROLE_END(role)
58048+
58049+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58050+
58051+ return;
58052+}
58053+
58054+static void
58055+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58056+ const struct vfsmount *mnt)
58057+{
58058+ ino_t ino = dentry->d_inode->i_ino;
58059+ dev_t dev = __get_dev(dentry);
58060+
58061+ __do_handle_create(matchn, ino, dev);
58062+
58063+ return;
58064+}
58065+
58066+void
58067+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58068+{
58069+ struct name_entry *matchn;
58070+
58071+ if (unlikely(!(gr_status & GR_READY)))
58072+ return;
58073+
58074+ preempt_disable();
58075+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58076+
58077+ if (unlikely((unsigned long)matchn)) {
58078+ write_lock(&gr_inode_lock);
58079+ do_handle_create(matchn, dentry, mnt);
58080+ write_unlock(&gr_inode_lock);
58081+ }
58082+ preempt_enable();
58083+
58084+ return;
58085+}
58086+
58087+void
58088+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58089+{
58090+ struct name_entry *matchn;
58091+
58092+ if (unlikely(!(gr_status & GR_READY)))
58093+ return;
58094+
58095+ preempt_disable();
58096+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58097+
58098+ if (unlikely((unsigned long)matchn)) {
58099+ write_lock(&gr_inode_lock);
58100+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58101+ write_unlock(&gr_inode_lock);
58102+ }
58103+ preempt_enable();
58104+
58105+ return;
58106+}
58107+
58108+void
58109+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58110+ struct dentry *old_dentry,
58111+ struct dentry *new_dentry,
58112+ struct vfsmount *mnt, const __u8 replace)
58113+{
58114+ struct name_entry *matchn;
58115+ struct inodev_entry *inodev;
58116+ struct inode *inode = new_dentry->d_inode;
58117+ ino_t oldinode = old_dentry->d_inode->i_ino;
58118+ dev_t olddev = __get_dev(old_dentry);
58119+
58120+ /* vfs_rename swaps the name and parent link for old_dentry and
58121+ new_dentry
58122+ at this point, old_dentry has the new name, parent link, and inode
58123+ for the renamed file
58124+ if a file is being replaced by a rename, new_dentry has the inode
58125+ and name for the replaced file
58126+ */
58127+
58128+ if (unlikely(!(gr_status & GR_READY)))
58129+ return;
58130+
58131+ preempt_disable();
58132+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58133+
58134+ /* we wouldn't have to check d_inode if it weren't for
58135+ NFS silly-renaming
58136+ */
58137+
58138+ write_lock(&gr_inode_lock);
58139+ if (unlikely(replace && inode)) {
58140+ ino_t newinode = inode->i_ino;
58141+ dev_t newdev = __get_dev(new_dentry);
58142+ inodev = lookup_inodev_entry(newinode, newdev);
58143+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58144+ do_handle_delete(inodev, newinode, newdev);
58145+ }
58146+
58147+ inodev = lookup_inodev_entry(oldinode, olddev);
58148+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58149+ do_handle_delete(inodev, oldinode, olddev);
58150+
58151+ if (unlikely((unsigned long)matchn))
58152+ do_handle_create(matchn, old_dentry, mnt);
58153+
58154+ write_unlock(&gr_inode_lock);
58155+ preempt_enable();
58156+
58157+ return;
58158+}
58159+
58160+static int
58161+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58162+ unsigned char **sum)
58163+{
58164+ struct acl_role_label *r;
58165+ struct role_allowed_ip *ipp;
58166+ struct role_transition *trans;
58167+ unsigned int i;
58168+ int found = 0;
58169+ u32 curr_ip = current->signal->curr_ip;
58170+
58171+ current->signal->saved_ip = curr_ip;
58172+
58173+ /* check transition table */
58174+
58175+ for (trans = current->role->transitions; trans; trans = trans->next) {
58176+ if (!strcmp(rolename, trans->rolename)) {
58177+ found = 1;
58178+ break;
58179+ }
58180+ }
58181+
58182+ if (!found)
58183+ return 0;
58184+
58185+ /* handle special roles that do not require authentication
58186+ and check ip */
58187+
58188+ FOR_EACH_ROLE_START(r)
58189+ if (!strcmp(rolename, r->rolename) &&
58190+ (r->roletype & GR_ROLE_SPECIAL)) {
58191+ found = 0;
58192+ if (r->allowed_ips != NULL) {
58193+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58194+ if ((ntohl(curr_ip) & ipp->netmask) ==
58195+ (ntohl(ipp->addr) & ipp->netmask))
58196+ found = 1;
58197+ }
58198+ } else
58199+ found = 2;
58200+ if (!found)
58201+ return 0;
58202+
58203+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58204+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58205+ *salt = NULL;
58206+ *sum = NULL;
58207+ return 1;
58208+ }
58209+ }
58210+ FOR_EACH_ROLE_END(r)
58211+
58212+ for (i = 0; i < num_sprole_pws; i++) {
58213+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58214+ *salt = acl_special_roles[i]->salt;
58215+ *sum = acl_special_roles[i]->sum;
58216+ return 1;
58217+ }
58218+ }
58219+
58220+ return 0;
58221+}
58222+
58223+static void
58224+assign_special_role(char *rolename)
58225+{
58226+ struct acl_object_label *obj;
58227+ struct acl_role_label *r;
58228+ struct acl_role_label *assigned = NULL;
58229+ struct task_struct *tsk;
58230+ struct file *filp;
58231+
58232+ FOR_EACH_ROLE_START(r)
58233+ if (!strcmp(rolename, r->rolename) &&
58234+ (r->roletype & GR_ROLE_SPECIAL)) {
58235+ assigned = r;
58236+ break;
58237+ }
58238+ FOR_EACH_ROLE_END(r)
58239+
58240+ if (!assigned)
58241+ return;
58242+
58243+ read_lock(&tasklist_lock);
58244+ read_lock(&grsec_exec_file_lock);
58245+
58246+ tsk = current->real_parent;
58247+ if (tsk == NULL)
58248+ goto out_unlock;
58249+
58250+ filp = tsk->exec_file;
58251+ if (filp == NULL)
58252+ goto out_unlock;
58253+
58254+ tsk->is_writable = 0;
58255+
58256+ tsk->acl_sp_role = 1;
58257+ tsk->acl_role_id = ++acl_sp_role_value;
58258+ tsk->role = assigned;
58259+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58260+
58261+ /* ignore additional mmap checks for processes that are writable
58262+ by the default ACL */
58263+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58264+ if (unlikely(obj->mode & GR_WRITE))
58265+ tsk->is_writable = 1;
58266+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58267+ if (unlikely(obj->mode & GR_WRITE))
58268+ tsk->is_writable = 1;
58269+
58270+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58271+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58272+#endif
58273+
58274+out_unlock:
58275+ read_unlock(&grsec_exec_file_lock);
58276+ read_unlock(&tasklist_lock);
58277+ return;
58278+}
58279+
58280+int gr_check_secure_terminal(struct task_struct *task)
58281+{
58282+ struct task_struct *p, *p2, *p3;
58283+ struct files_struct *files;
58284+ struct fdtable *fdt;
58285+ struct file *our_file = NULL, *file;
58286+ int i;
58287+
58288+ if (task->signal->tty == NULL)
58289+ return 1;
58290+
58291+ files = get_files_struct(task);
58292+ if (files != NULL) {
58293+ rcu_read_lock();
58294+ fdt = files_fdtable(files);
58295+ for (i=0; i < fdt->max_fds; i++) {
58296+ file = fcheck_files(files, i);
58297+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58298+ get_file(file);
58299+ our_file = file;
58300+ }
58301+ }
58302+ rcu_read_unlock();
58303+ put_files_struct(files);
58304+ }
58305+
58306+ if (our_file == NULL)
58307+ return 1;
58308+
58309+ read_lock(&tasklist_lock);
58310+ do_each_thread(p2, p) {
58311+ files = get_files_struct(p);
58312+ if (files == NULL ||
58313+ (p->signal && p->signal->tty == task->signal->tty)) {
58314+ if (files != NULL)
58315+ put_files_struct(files);
58316+ continue;
58317+ }
58318+ rcu_read_lock();
58319+ fdt = files_fdtable(files);
58320+ for (i=0; i < fdt->max_fds; i++) {
58321+ file = fcheck_files(files, i);
58322+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
58323+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
58324+ p3 = task;
58325+ while (p3->pid > 0) {
58326+ if (p3 == p)
58327+ break;
58328+ p3 = p3->real_parent;
58329+ }
58330+ if (p3 == p)
58331+ break;
58332+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
58333+ gr_handle_alertkill(p);
58334+ rcu_read_unlock();
58335+ put_files_struct(files);
58336+ read_unlock(&tasklist_lock);
58337+ fput(our_file);
58338+ return 0;
58339+ }
58340+ }
58341+ rcu_read_unlock();
58342+ put_files_struct(files);
58343+ } while_each_thread(p2, p);
58344+ read_unlock(&tasklist_lock);
58345+
58346+ fput(our_file);
58347+ return 1;
58348+}
58349+
58350+ssize_t
58351+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
58352+{
58353+ struct gr_arg_wrapper uwrap;
58354+ unsigned char *sprole_salt = NULL;
58355+ unsigned char *sprole_sum = NULL;
58356+ int error = sizeof (struct gr_arg_wrapper);
58357+ int error2 = 0;
58358+
58359+ mutex_lock(&gr_dev_mutex);
58360+
58361+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
58362+ error = -EPERM;
58363+ goto out;
58364+ }
58365+
58366+ if (count != sizeof (struct gr_arg_wrapper)) {
58367+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
58368+ error = -EINVAL;
58369+ goto out;
58370+ }
58371+
58372+
58373+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
58374+ gr_auth_expires = 0;
58375+ gr_auth_attempts = 0;
58376+ }
58377+
58378+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
58379+ error = -EFAULT;
58380+ goto out;
58381+ }
58382+
58383+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
58384+ error = -EINVAL;
58385+ goto out;
58386+ }
58387+
58388+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
58389+ error = -EFAULT;
58390+ goto out;
58391+ }
58392+
58393+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58394+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58395+ time_after(gr_auth_expires, get_seconds())) {
58396+ error = -EBUSY;
58397+ goto out;
58398+ }
58399+
58400+ /* if non-root trying to do anything other than use a special role,
58401+ do not attempt authentication, do not count towards authentication
58402+ locking
58403+ */
58404+
58405+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
58406+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58407+ current_uid()) {
58408+ error = -EPERM;
58409+ goto out;
58410+ }
58411+
58412+ /* ensure pw and special role name are null terminated */
58413+
58414+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
58415+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
58416+
58417+ /* Okay.
58418+ * We have our enough of the argument structure..(we have yet
58419+ * to copy_from_user the tables themselves) . Copy the tables
58420+ * only if we need them, i.e. for loading operations. */
58421+
58422+ switch (gr_usermode->mode) {
58423+ case GR_STATUS:
58424+ if (gr_status & GR_READY) {
58425+ error = 1;
58426+ if (!gr_check_secure_terminal(current))
58427+ error = 3;
58428+ } else
58429+ error = 2;
58430+ goto out;
58431+ case GR_SHUTDOWN:
58432+ if ((gr_status & GR_READY)
58433+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58434+ pax_open_kernel();
58435+ gr_status &= ~GR_READY;
58436+ pax_close_kernel();
58437+
58438+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
58439+ free_variables();
58440+ memset(gr_usermode, 0, sizeof (struct gr_arg));
58441+ memset(gr_system_salt, 0, GR_SALT_LEN);
58442+ memset(gr_system_sum, 0, GR_SHA_LEN);
58443+ } else if (gr_status & GR_READY) {
58444+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
58445+ error = -EPERM;
58446+ } else {
58447+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
58448+ error = -EAGAIN;
58449+ }
58450+ break;
58451+ case GR_ENABLE:
58452+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
58453+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
58454+ else {
58455+ if (gr_status & GR_READY)
58456+ error = -EAGAIN;
58457+ else
58458+ error = error2;
58459+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
58460+ }
58461+ break;
58462+ case GR_RELOAD:
58463+ if (!(gr_status & GR_READY)) {
58464+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
58465+ error = -EAGAIN;
58466+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58467+ lock_kernel();
58468+
58469+ pax_open_kernel();
58470+ gr_status &= ~GR_READY;
58471+ pax_close_kernel();
58472+
58473+ free_variables();
58474+ if (!(error2 = gracl_init(gr_usermode))) {
58475+ unlock_kernel();
58476+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
58477+ } else {
58478+ unlock_kernel();
58479+ error = error2;
58480+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58481+ }
58482+ } else {
58483+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58484+ error = -EPERM;
58485+ }
58486+ break;
58487+ case GR_SEGVMOD:
58488+ if (unlikely(!(gr_status & GR_READY))) {
58489+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
58490+ error = -EAGAIN;
58491+ break;
58492+ }
58493+
58494+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58495+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
58496+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
58497+ struct acl_subject_label *segvacl;
58498+ segvacl =
58499+ lookup_acl_subj_label(gr_usermode->segv_inode,
58500+ gr_usermode->segv_device,
58501+ current->role);
58502+ if (segvacl) {
58503+ segvacl->crashes = 0;
58504+ segvacl->expires = 0;
58505+ }
58506+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
58507+ gr_remove_uid(gr_usermode->segv_uid);
58508+ }
58509+ } else {
58510+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
58511+ error = -EPERM;
58512+ }
58513+ break;
58514+ case GR_SPROLE:
58515+ case GR_SPROLEPAM:
58516+ if (unlikely(!(gr_status & GR_READY))) {
58517+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
58518+ error = -EAGAIN;
58519+ break;
58520+ }
58521+
58522+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
58523+ current->role->expires = 0;
58524+ current->role->auth_attempts = 0;
58525+ }
58526+
58527+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58528+ time_after(current->role->expires, get_seconds())) {
58529+ error = -EBUSY;
58530+ goto out;
58531+ }
58532+
58533+ if (lookup_special_role_auth
58534+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
58535+ && ((!sprole_salt && !sprole_sum)
58536+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
58537+ char *p = "";
58538+ assign_special_role(gr_usermode->sp_role);
58539+ read_lock(&tasklist_lock);
58540+ if (current->real_parent)
58541+ p = current->real_parent->role->rolename;
58542+ read_unlock(&tasklist_lock);
58543+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
58544+ p, acl_sp_role_value);
58545+ } else {
58546+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
58547+ error = -EPERM;
58548+ if(!(current->role->auth_attempts++))
58549+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58550+
58551+ goto out;
58552+ }
58553+ break;
58554+ case GR_UNSPROLE:
58555+ if (unlikely(!(gr_status & GR_READY))) {
58556+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
58557+ error = -EAGAIN;
58558+ break;
58559+ }
58560+
58561+ if (current->role->roletype & GR_ROLE_SPECIAL) {
58562+ char *p = "";
58563+ int i = 0;
58564+
58565+ read_lock(&tasklist_lock);
58566+ if (current->real_parent) {
58567+ p = current->real_parent->role->rolename;
58568+ i = current->real_parent->acl_role_id;
58569+ }
58570+ read_unlock(&tasklist_lock);
58571+
58572+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
58573+ gr_set_acls(1);
58574+ } else {
58575+ error = -EPERM;
58576+ goto out;
58577+ }
58578+ break;
58579+ default:
58580+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
58581+ error = -EINVAL;
58582+ break;
58583+ }
58584+
58585+ if (error != -EPERM)
58586+ goto out;
58587+
58588+ if(!(gr_auth_attempts++))
58589+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58590+
58591+ out:
58592+ mutex_unlock(&gr_dev_mutex);
58593+ return error;
58594+}
58595+
58596+/* must be called with
58597+ rcu_read_lock();
58598+ read_lock(&tasklist_lock);
58599+ read_lock(&grsec_exec_file_lock);
58600+*/
58601+int gr_apply_subject_to_task(struct task_struct *task)
58602+{
58603+ struct acl_object_label *obj;
58604+ char *tmpname;
58605+ struct acl_subject_label *tmpsubj;
58606+ struct file *filp;
58607+ struct name_entry *nmatch;
58608+
58609+ filp = task->exec_file;
58610+ if (filp == NULL)
58611+ return 0;
58612+
58613+ /* the following is to apply the correct subject
58614+ on binaries running when the RBAC system
58615+ is enabled, when the binaries have been
58616+ replaced or deleted since their execution
58617+ -----
58618+ when the RBAC system starts, the inode/dev
58619+ from exec_file will be one the RBAC system
58620+ is unaware of. It only knows the inode/dev
58621+ of the present file on disk, or the absence
58622+ of it.
58623+ */
58624+ preempt_disable();
58625+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
58626+
58627+ nmatch = lookup_name_entry(tmpname);
58628+ preempt_enable();
58629+ tmpsubj = NULL;
58630+ if (nmatch) {
58631+ if (nmatch->deleted)
58632+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
58633+ else
58634+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
58635+ if (tmpsubj != NULL)
58636+ task->acl = tmpsubj;
58637+ }
58638+ if (tmpsubj == NULL)
58639+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
58640+ task->role);
58641+ if (task->acl) {
58642+ task->is_writable = 0;
58643+ /* ignore additional mmap checks for processes that are writable
58644+ by the default ACL */
58645+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58646+ if (unlikely(obj->mode & GR_WRITE))
58647+ task->is_writable = 1;
58648+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58649+ if (unlikely(obj->mode & GR_WRITE))
58650+ task->is_writable = 1;
58651+
58652+ gr_set_proc_res(task);
58653+
58654+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58655+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58656+#endif
58657+ } else {
58658+ return 1;
58659+ }
58660+
58661+ return 0;
58662+}
58663+
58664+int
58665+gr_set_acls(const int type)
58666+{
58667+ struct task_struct *task, *task2;
58668+ struct acl_role_label *role = current->role;
58669+ __u16 acl_role_id = current->acl_role_id;
58670+ const struct cred *cred;
58671+ int ret;
58672+
58673+ rcu_read_lock();
58674+ read_lock(&tasklist_lock);
58675+ read_lock(&grsec_exec_file_lock);
58676+ do_each_thread(task2, task) {
58677+ /* check to see if we're called from the exit handler,
58678+ if so, only replace ACLs that have inherited the admin
58679+ ACL */
58680+
58681+ if (type && (task->role != role ||
58682+ task->acl_role_id != acl_role_id))
58683+ continue;
58684+
58685+ task->acl_role_id = 0;
58686+ task->acl_sp_role = 0;
58687+
58688+ if (task->exec_file) {
58689+ cred = __task_cred(task);
58690+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
58691+
58692+ ret = gr_apply_subject_to_task(task);
58693+ if (ret) {
58694+ read_unlock(&grsec_exec_file_lock);
58695+ read_unlock(&tasklist_lock);
58696+ rcu_read_unlock();
58697+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
58698+ return ret;
58699+ }
58700+ } else {
58701+ // it's a kernel process
58702+ task->role = kernel_role;
58703+ task->acl = kernel_role->root_label;
58704+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
58705+ task->acl->mode &= ~GR_PROCFIND;
58706+#endif
58707+ }
58708+ } while_each_thread(task2, task);
58709+ read_unlock(&grsec_exec_file_lock);
58710+ read_unlock(&tasklist_lock);
58711+ rcu_read_unlock();
58712+
58713+ return 0;
58714+}
58715+
58716+void
58717+gr_learn_resource(const struct task_struct *task,
58718+ const int res, const unsigned long wanted, const int gt)
58719+{
58720+ struct acl_subject_label *acl;
58721+ const struct cred *cred;
58722+
58723+ if (unlikely((gr_status & GR_READY) &&
58724+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
58725+ goto skip_reslog;
58726+
58727+#ifdef CONFIG_GRKERNSEC_RESLOG
58728+ gr_log_resource(task, res, wanted, gt);
58729+#endif
58730+ skip_reslog:
58731+
58732+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
58733+ return;
58734+
58735+ acl = task->acl;
58736+
58737+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
58738+ !(acl->resmask & (1 << (unsigned short) res))))
58739+ return;
58740+
58741+ if (wanted >= acl->res[res].rlim_cur) {
58742+ unsigned long res_add;
58743+
58744+ res_add = wanted;
58745+ switch (res) {
58746+ case RLIMIT_CPU:
58747+ res_add += GR_RLIM_CPU_BUMP;
58748+ break;
58749+ case RLIMIT_FSIZE:
58750+ res_add += GR_RLIM_FSIZE_BUMP;
58751+ break;
58752+ case RLIMIT_DATA:
58753+ res_add += GR_RLIM_DATA_BUMP;
58754+ break;
58755+ case RLIMIT_STACK:
58756+ res_add += GR_RLIM_STACK_BUMP;
58757+ break;
58758+ case RLIMIT_CORE:
58759+ res_add += GR_RLIM_CORE_BUMP;
58760+ break;
58761+ case RLIMIT_RSS:
58762+ res_add += GR_RLIM_RSS_BUMP;
58763+ break;
58764+ case RLIMIT_NPROC:
58765+ res_add += GR_RLIM_NPROC_BUMP;
58766+ break;
58767+ case RLIMIT_NOFILE:
58768+ res_add += GR_RLIM_NOFILE_BUMP;
58769+ break;
58770+ case RLIMIT_MEMLOCK:
58771+ res_add += GR_RLIM_MEMLOCK_BUMP;
58772+ break;
58773+ case RLIMIT_AS:
58774+ res_add += GR_RLIM_AS_BUMP;
58775+ break;
58776+ case RLIMIT_LOCKS:
58777+ res_add += GR_RLIM_LOCKS_BUMP;
58778+ break;
58779+ case RLIMIT_SIGPENDING:
58780+ res_add += GR_RLIM_SIGPENDING_BUMP;
58781+ break;
58782+ case RLIMIT_MSGQUEUE:
58783+ res_add += GR_RLIM_MSGQUEUE_BUMP;
58784+ break;
58785+ case RLIMIT_NICE:
58786+ res_add += GR_RLIM_NICE_BUMP;
58787+ break;
58788+ case RLIMIT_RTPRIO:
58789+ res_add += GR_RLIM_RTPRIO_BUMP;
58790+ break;
58791+ case RLIMIT_RTTIME:
58792+ res_add += GR_RLIM_RTTIME_BUMP;
58793+ break;
58794+ }
58795+
58796+ acl->res[res].rlim_cur = res_add;
58797+
58798+ if (wanted > acl->res[res].rlim_max)
58799+ acl->res[res].rlim_max = res_add;
58800+
58801+ /* only log the subject filename, since resource logging is supported for
58802+ single-subject learning only */
58803+ rcu_read_lock();
58804+ cred = __task_cred(task);
58805+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
58806+ task->role->roletype, cred->uid, cred->gid, acl->filename,
58807+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
58808+ "", (unsigned long) res, &task->signal->saved_ip);
58809+ rcu_read_unlock();
58810+ }
58811+
58812+ return;
58813+}
58814+
58815+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
58816+void
58817+pax_set_initial_flags(struct linux_binprm *bprm)
58818+{
58819+ struct task_struct *task = current;
58820+ struct acl_subject_label *proc;
58821+ unsigned long flags;
58822+
58823+ if (unlikely(!(gr_status & GR_READY)))
58824+ return;
58825+
58826+ flags = pax_get_flags(task);
58827+
58828+ proc = task->acl;
58829+
58830+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
58831+ flags &= ~MF_PAX_PAGEEXEC;
58832+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
58833+ flags &= ~MF_PAX_SEGMEXEC;
58834+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
58835+ flags &= ~MF_PAX_RANDMMAP;
58836+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
58837+ flags &= ~MF_PAX_EMUTRAMP;
58838+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
58839+ flags &= ~MF_PAX_MPROTECT;
58840+
58841+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
58842+ flags |= MF_PAX_PAGEEXEC;
58843+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
58844+ flags |= MF_PAX_SEGMEXEC;
58845+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
58846+ flags |= MF_PAX_RANDMMAP;
58847+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
58848+ flags |= MF_PAX_EMUTRAMP;
58849+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
58850+ flags |= MF_PAX_MPROTECT;
58851+
58852+ pax_set_flags(task, flags);
58853+
58854+ return;
58855+}
58856+#endif
58857+
58858+#ifdef CONFIG_SYSCTL
58859+/* Eric Biederman likes breaking userland ABI and every inode-based security
58860+ system to save 35kb of memory */
58861+
58862+/* we modify the passed in filename, but adjust it back before returning */
58863+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
58864+{
58865+ struct name_entry *nmatch;
58866+ char *p, *lastp = NULL;
58867+ struct acl_object_label *obj = NULL, *tmp;
58868+ struct acl_subject_label *tmpsubj;
58869+ char c = '\0';
58870+
58871+ read_lock(&gr_inode_lock);
58872+
58873+ p = name + len - 1;
58874+ do {
58875+ nmatch = lookup_name_entry(name);
58876+ if (lastp != NULL)
58877+ *lastp = c;
58878+
58879+ if (nmatch == NULL)
58880+ goto next_component;
58881+ tmpsubj = current->acl;
58882+ do {
58883+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
58884+ if (obj != NULL) {
58885+ tmp = obj->globbed;
58886+ while (tmp) {
58887+ if (!glob_match(tmp->filename, name)) {
58888+ obj = tmp;
58889+ goto found_obj;
58890+ }
58891+ tmp = tmp->next;
58892+ }
58893+ goto found_obj;
58894+ }
58895+ } while ((tmpsubj = tmpsubj->parent_subject));
58896+next_component:
58897+ /* end case */
58898+ if (p == name)
58899+ break;
58900+
58901+ while (*p != '/')
58902+ p--;
58903+ if (p == name)
58904+ lastp = p + 1;
58905+ else {
58906+ lastp = p;
58907+ p--;
58908+ }
58909+ c = *lastp;
58910+ *lastp = '\0';
58911+ } while (1);
58912+found_obj:
58913+ read_unlock(&gr_inode_lock);
58914+ /* obj returned will always be non-null */
58915+ return obj;
58916+}
58917+
58918+/* returns 0 when allowing, non-zero on error
58919+ op of 0 is used for readdir, so we don't log the names of hidden files
58920+*/
58921+__u32
58922+gr_handle_sysctl(const struct ctl_table *table, const int op)
58923+{
58924+ ctl_table *tmp;
58925+ const char *proc_sys = "/proc/sys";
58926+ char *path;
58927+ struct acl_object_label *obj;
58928+ unsigned short len = 0, pos = 0, depth = 0, i;
58929+ __u32 err = 0;
58930+ __u32 mode = 0;
58931+
58932+ if (unlikely(!(gr_status & GR_READY)))
58933+ return 0;
58934+
58935+ /* for now, ignore operations on non-sysctl entries if it's not a
58936+ readdir*/
58937+ if (table->child != NULL && op != 0)
58938+ return 0;
58939+
58940+ mode |= GR_FIND;
58941+ /* it's only a read if it's an entry, read on dirs is for readdir */
58942+ if (op & MAY_READ)
58943+ mode |= GR_READ;
58944+ if (op & MAY_WRITE)
58945+ mode |= GR_WRITE;
58946+
58947+ preempt_disable();
58948+
58949+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58950+
58951+ /* it's only a read/write if it's an actual entry, not a dir
58952+ (which are opened for readdir)
58953+ */
58954+
58955+ /* convert the requested sysctl entry into a pathname */
58956+
58957+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58958+ len += strlen(tmp->procname);
58959+ len++;
58960+ depth++;
58961+ }
58962+
58963+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
58964+ /* deny */
58965+ goto out;
58966+ }
58967+
58968+ memset(path, 0, PAGE_SIZE);
58969+
58970+ memcpy(path, proc_sys, strlen(proc_sys));
58971+
58972+ pos += strlen(proc_sys);
58973+
58974+ for (; depth > 0; depth--) {
58975+ path[pos] = '/';
58976+ pos++;
58977+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58978+ if (depth == i) {
58979+ memcpy(path + pos, tmp->procname,
58980+ strlen(tmp->procname));
58981+ pos += strlen(tmp->procname);
58982+ }
58983+ i++;
58984+ }
58985+ }
58986+
58987+ obj = gr_lookup_by_name(path, pos);
58988+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
58989+
58990+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
58991+ ((err & mode) != mode))) {
58992+ __u32 new_mode = mode;
58993+
58994+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58995+
58996+ err = 0;
58997+ gr_log_learn_sysctl(path, new_mode);
58998+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
58999+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59000+ err = -ENOENT;
59001+ } else if (!(err & GR_FIND)) {
59002+ err = -ENOENT;
59003+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59004+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59005+ path, (mode & GR_READ) ? " reading" : "",
59006+ (mode & GR_WRITE) ? " writing" : "");
59007+ err = -EACCES;
59008+ } else if ((err & mode) != mode) {
59009+ err = -EACCES;
59010+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59011+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59012+ path, (mode & GR_READ) ? " reading" : "",
59013+ (mode & GR_WRITE) ? " writing" : "");
59014+ err = 0;
59015+ } else
59016+ err = 0;
59017+
59018+ out:
59019+ preempt_enable();
59020+
59021+ return err;
59022+}
59023+#endif
59024+
59025+int
59026+gr_handle_proc_ptrace(struct task_struct *task)
59027+{
59028+ struct file *filp;
59029+ struct task_struct *tmp = task;
59030+ struct task_struct *curtemp = current;
59031+ __u32 retmode;
59032+
59033+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59034+ if (unlikely(!(gr_status & GR_READY)))
59035+ return 0;
59036+#endif
59037+
59038+ read_lock(&tasklist_lock);
59039+ read_lock(&grsec_exec_file_lock);
59040+ filp = task->exec_file;
59041+
59042+ while (tmp->pid > 0) {
59043+ if (tmp == curtemp)
59044+ break;
59045+ tmp = tmp->real_parent;
59046+ }
59047+
59048+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59049+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59050+ read_unlock(&grsec_exec_file_lock);
59051+ read_unlock(&tasklist_lock);
59052+ return 1;
59053+ }
59054+
59055+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59056+ if (!(gr_status & GR_READY)) {
59057+ read_unlock(&grsec_exec_file_lock);
59058+ read_unlock(&tasklist_lock);
59059+ return 0;
59060+ }
59061+#endif
59062+
59063+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59064+ read_unlock(&grsec_exec_file_lock);
59065+ read_unlock(&tasklist_lock);
59066+
59067+ if (retmode & GR_NOPTRACE)
59068+ return 1;
59069+
59070+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59071+ && (current->acl != task->acl || (current->acl != current->role->root_label
59072+ && current->pid != task->pid)))
59073+ return 1;
59074+
59075+ return 0;
59076+}
59077+
59078+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59079+{
59080+ if (unlikely(!(gr_status & GR_READY)))
59081+ return;
59082+
59083+ if (!(current->role->roletype & GR_ROLE_GOD))
59084+ return;
59085+
59086+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59087+ p->role->rolename, gr_task_roletype_to_char(p),
59088+ p->acl->filename);
59089+}
59090+
59091+int
59092+gr_handle_ptrace(struct task_struct *task, const long request)
59093+{
59094+ struct task_struct *tmp = task;
59095+ struct task_struct *curtemp = current;
59096+ __u32 retmode;
59097+
59098+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59099+ if (unlikely(!(gr_status & GR_READY)))
59100+ return 0;
59101+#endif
59102+
59103+ read_lock(&tasklist_lock);
59104+ while (tmp->pid > 0) {
59105+ if (tmp == curtemp)
59106+ break;
59107+ tmp = tmp->real_parent;
59108+ }
59109+
59110+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59111+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59112+ read_unlock(&tasklist_lock);
59113+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59114+ return 1;
59115+ }
59116+ read_unlock(&tasklist_lock);
59117+
59118+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59119+ if (!(gr_status & GR_READY))
59120+ return 0;
59121+#endif
59122+
59123+ read_lock(&grsec_exec_file_lock);
59124+ if (unlikely(!task->exec_file)) {
59125+ read_unlock(&grsec_exec_file_lock);
59126+ return 0;
59127+ }
59128+
59129+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59130+ read_unlock(&grsec_exec_file_lock);
59131+
59132+ if (retmode & GR_NOPTRACE) {
59133+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59134+ return 1;
59135+ }
59136+
59137+ if (retmode & GR_PTRACERD) {
59138+ switch (request) {
59139+ case PTRACE_POKETEXT:
59140+ case PTRACE_POKEDATA:
59141+ case PTRACE_POKEUSR:
59142+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59143+ case PTRACE_SETREGS:
59144+ case PTRACE_SETFPREGS:
59145+#endif
59146+#ifdef CONFIG_X86
59147+ case PTRACE_SETFPXREGS:
59148+#endif
59149+#ifdef CONFIG_ALTIVEC
59150+ case PTRACE_SETVRREGS:
59151+#endif
59152+ return 1;
59153+ default:
59154+ return 0;
59155+ }
59156+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
59157+ !(current->role->roletype & GR_ROLE_GOD) &&
59158+ (current->acl != task->acl)) {
59159+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59160+ return 1;
59161+ }
59162+
59163+ return 0;
59164+}
59165+
59166+static int is_writable_mmap(const struct file *filp)
59167+{
59168+ struct task_struct *task = current;
59169+ struct acl_object_label *obj, *obj2;
59170+
59171+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59172+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59173+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59174+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59175+ task->role->root_label);
59176+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59177+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59178+ return 1;
59179+ }
59180+ }
59181+ return 0;
59182+}
59183+
59184+int
59185+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59186+{
59187+ __u32 mode;
59188+
59189+ if (unlikely(!file || !(prot & PROT_EXEC)))
59190+ return 1;
59191+
59192+ if (is_writable_mmap(file))
59193+ return 0;
59194+
59195+ mode =
59196+ gr_search_file(file->f_path.dentry,
59197+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59198+ file->f_path.mnt);
59199+
59200+ if (!gr_tpe_allow(file))
59201+ return 0;
59202+
59203+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59204+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59205+ return 0;
59206+ } else if (unlikely(!(mode & GR_EXEC))) {
59207+ return 0;
59208+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59209+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59210+ return 1;
59211+ }
59212+
59213+ return 1;
59214+}
59215+
59216+int
59217+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59218+{
59219+ __u32 mode;
59220+
59221+ if (unlikely(!file || !(prot & PROT_EXEC)))
59222+ return 1;
59223+
59224+ if (is_writable_mmap(file))
59225+ return 0;
59226+
59227+ mode =
59228+ gr_search_file(file->f_path.dentry,
59229+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59230+ file->f_path.mnt);
59231+
59232+ if (!gr_tpe_allow(file))
59233+ return 0;
59234+
59235+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59236+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59237+ return 0;
59238+ } else if (unlikely(!(mode & GR_EXEC))) {
59239+ return 0;
59240+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59241+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59242+ return 1;
59243+ }
59244+
59245+ return 1;
59246+}
59247+
59248+void
59249+gr_acl_handle_psacct(struct task_struct *task, const long code)
59250+{
59251+ unsigned long runtime;
59252+ unsigned long cputime;
59253+ unsigned int wday, cday;
59254+ __u8 whr, chr;
59255+ __u8 wmin, cmin;
59256+ __u8 wsec, csec;
59257+ struct timespec timeval;
59258+
59259+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59260+ !(task->acl->mode & GR_PROCACCT)))
59261+ return;
59262+
59263+ do_posix_clock_monotonic_gettime(&timeval);
59264+ runtime = timeval.tv_sec - task->start_time.tv_sec;
59265+ wday = runtime / (3600 * 24);
59266+ runtime -= wday * (3600 * 24);
59267+ whr = runtime / 3600;
59268+ runtime -= whr * 3600;
59269+ wmin = runtime / 60;
59270+ runtime -= wmin * 60;
59271+ wsec = runtime;
59272+
59273+ cputime = (task->utime + task->stime) / HZ;
59274+ cday = cputime / (3600 * 24);
59275+ cputime -= cday * (3600 * 24);
59276+ chr = cputime / 3600;
59277+ cputime -= chr * 3600;
59278+ cmin = cputime / 60;
59279+ cputime -= cmin * 60;
59280+ csec = cputime;
59281+
59282+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59283+
59284+ return;
59285+}
59286+
59287+void gr_set_kernel_label(struct task_struct *task)
59288+{
59289+ if (gr_status & GR_READY) {
59290+ task->role = kernel_role;
59291+ task->acl = kernel_role->root_label;
59292+ }
59293+ return;
59294+}
59295+
59296+#ifdef CONFIG_TASKSTATS
59297+int gr_is_taskstats_denied(int pid)
59298+{
59299+ struct task_struct *task;
59300+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59301+ const struct cred *cred;
59302+#endif
59303+ int ret = 0;
59304+
59305+ /* restrict taskstats viewing to un-chrooted root users
59306+ who have the 'view' subject flag if the RBAC system is enabled
59307+ */
59308+
59309+ rcu_read_lock();
59310+ read_lock(&tasklist_lock);
59311+ task = find_task_by_vpid(pid);
59312+ if (task) {
59313+#ifdef CONFIG_GRKERNSEC_CHROOT
59314+ if (proc_is_chrooted(task))
59315+ ret = -EACCES;
59316+#endif
59317+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59318+ cred = __task_cred(task);
59319+#ifdef CONFIG_GRKERNSEC_PROC_USER
59320+ if (cred->uid != 0)
59321+ ret = -EACCES;
59322+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59323+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
59324+ ret = -EACCES;
59325+#endif
59326+#endif
59327+ if (gr_status & GR_READY) {
59328+ if (!(task->acl->mode & GR_VIEW))
59329+ ret = -EACCES;
59330+ }
59331+ } else
59332+ ret = -ENOENT;
59333+
59334+ read_unlock(&tasklist_lock);
59335+ rcu_read_unlock();
59336+
59337+ return ret;
59338+}
59339+#endif
59340+
59341+/* AUXV entries are filled via a descendant of search_binary_handler
59342+ after we've already applied the subject for the target
59343+*/
59344+int gr_acl_enable_at_secure(void)
59345+{
59346+ if (unlikely(!(gr_status & GR_READY)))
59347+ return 0;
59348+
59349+ if (current->acl->mode & GR_ATSECURE)
59350+ return 1;
59351+
59352+ return 0;
59353+}
59354+
59355+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
59356+{
59357+ struct task_struct *task = current;
59358+ struct dentry *dentry = file->f_path.dentry;
59359+ struct vfsmount *mnt = file->f_path.mnt;
59360+ struct acl_object_label *obj, *tmp;
59361+ struct acl_subject_label *subj;
59362+ unsigned int bufsize;
59363+ int is_not_root;
59364+ char *path;
59365+ dev_t dev = __get_dev(dentry);
59366+
59367+ if (unlikely(!(gr_status & GR_READY)))
59368+ return 1;
59369+
59370+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59371+ return 1;
59372+
59373+ /* ignore Eric Biederman */
59374+ if (IS_PRIVATE(dentry->d_inode))
59375+ return 1;
59376+
59377+ subj = task->acl;
59378+ do {
59379+ obj = lookup_acl_obj_label(ino, dev, subj);
59380+ if (obj != NULL)
59381+ return (obj->mode & GR_FIND) ? 1 : 0;
59382+ } while ((subj = subj->parent_subject));
59383+
59384+ /* this is purely an optimization since we're looking for an object
59385+ for the directory we're doing a readdir on
59386+ if it's possible for any globbed object to match the entry we're
59387+ filling into the directory, then the object we find here will be
59388+ an anchor point with attached globbed objects
59389+ */
59390+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
59391+ if (obj->globbed == NULL)
59392+ return (obj->mode & GR_FIND) ? 1 : 0;
59393+
59394+ is_not_root = ((obj->filename[0] == '/') &&
59395+ (obj->filename[1] == '\0')) ? 0 : 1;
59396+ bufsize = PAGE_SIZE - namelen - is_not_root;
59397+
59398+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
59399+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
59400+ return 1;
59401+
59402+ preempt_disable();
59403+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
59404+ bufsize);
59405+
59406+ bufsize = strlen(path);
59407+
59408+ /* if base is "/", don't append an additional slash */
59409+ if (is_not_root)
59410+ *(path + bufsize) = '/';
59411+ memcpy(path + bufsize + is_not_root, name, namelen);
59412+ *(path + bufsize + namelen + is_not_root) = '\0';
59413+
59414+ tmp = obj->globbed;
59415+ while (tmp) {
59416+ if (!glob_match(tmp->filename, path)) {
59417+ preempt_enable();
59418+ return (tmp->mode & GR_FIND) ? 1 : 0;
59419+ }
59420+ tmp = tmp->next;
59421+ }
59422+ preempt_enable();
59423+ return (obj->mode & GR_FIND) ? 1 : 0;
59424+}
59425+
59426+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
59427+EXPORT_SYMBOL(gr_acl_is_enabled);
59428+#endif
59429+EXPORT_SYMBOL(gr_learn_resource);
59430+EXPORT_SYMBOL(gr_set_kernel_label);
59431+#ifdef CONFIG_SECURITY
59432+EXPORT_SYMBOL(gr_check_user_change);
59433+EXPORT_SYMBOL(gr_check_group_change);
59434+#endif
59435+
59436diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
59437new file mode 100644
59438index 0000000..34fefda
59439--- /dev/null
59440+++ b/grsecurity/gracl_alloc.c
59441@@ -0,0 +1,105 @@
59442+#include <linux/kernel.h>
59443+#include <linux/mm.h>
59444+#include <linux/slab.h>
59445+#include <linux/vmalloc.h>
59446+#include <linux/gracl.h>
59447+#include <linux/grsecurity.h>
59448+
59449+static unsigned long alloc_stack_next = 1;
59450+static unsigned long alloc_stack_size = 1;
59451+static void **alloc_stack;
59452+
59453+static __inline__ int
59454+alloc_pop(void)
59455+{
59456+ if (alloc_stack_next == 1)
59457+ return 0;
59458+
59459+ kfree(alloc_stack[alloc_stack_next - 2]);
59460+
59461+ alloc_stack_next--;
59462+
59463+ return 1;
59464+}
59465+
59466+static __inline__ int
59467+alloc_push(void *buf)
59468+{
59469+ if (alloc_stack_next >= alloc_stack_size)
59470+ return 1;
59471+
59472+ alloc_stack[alloc_stack_next - 1] = buf;
59473+
59474+ alloc_stack_next++;
59475+
59476+ return 0;
59477+}
59478+
59479+void *
59480+acl_alloc(unsigned long len)
59481+{
59482+ void *ret = NULL;
59483+
59484+ if (!len || len > PAGE_SIZE)
59485+ goto out;
59486+
59487+ ret = kmalloc(len, GFP_KERNEL);
59488+
59489+ if (ret) {
59490+ if (alloc_push(ret)) {
59491+ kfree(ret);
59492+ ret = NULL;
59493+ }
59494+ }
59495+
59496+out:
59497+ return ret;
59498+}
59499+
59500+void *
59501+acl_alloc_num(unsigned long num, unsigned long len)
59502+{
59503+ if (!len || (num > (PAGE_SIZE / len)))
59504+ return NULL;
59505+
59506+ return acl_alloc(num * len);
59507+}
59508+
59509+void
59510+acl_free_all(void)
59511+{
59512+ if (gr_acl_is_enabled() || !alloc_stack)
59513+ return;
59514+
59515+ while (alloc_pop()) ;
59516+
59517+ if (alloc_stack) {
59518+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
59519+ kfree(alloc_stack);
59520+ else
59521+ vfree(alloc_stack);
59522+ }
59523+
59524+ alloc_stack = NULL;
59525+ alloc_stack_size = 1;
59526+ alloc_stack_next = 1;
59527+
59528+ return;
59529+}
59530+
59531+int
59532+acl_alloc_stack_init(unsigned long size)
59533+{
59534+ if ((size * sizeof (void *)) <= PAGE_SIZE)
59535+ alloc_stack =
59536+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
59537+ else
59538+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
59539+
59540+ alloc_stack_size = size;
59541+
59542+ if (!alloc_stack)
59543+ return 0;
59544+ else
59545+ return 1;
59546+}
59547diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
59548new file mode 100644
59549index 0000000..955ddfb
59550--- /dev/null
59551+++ b/grsecurity/gracl_cap.c
59552@@ -0,0 +1,101 @@
59553+#include <linux/kernel.h>
59554+#include <linux/module.h>
59555+#include <linux/sched.h>
59556+#include <linux/gracl.h>
59557+#include <linux/grsecurity.h>
59558+#include <linux/grinternal.h>
59559+
59560+extern const char *captab_log[];
59561+extern int captab_log_entries;
59562+
59563+int
59564+gr_acl_is_capable(const int cap)
59565+{
59566+ struct task_struct *task = current;
59567+ const struct cred *cred = current_cred();
59568+ struct acl_subject_label *curracl;
59569+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59570+ kernel_cap_t cap_audit = __cap_empty_set;
59571+
59572+ if (!gr_acl_is_enabled())
59573+ return 1;
59574+
59575+ curracl = task->acl;
59576+
59577+ cap_drop = curracl->cap_lower;
59578+ cap_mask = curracl->cap_mask;
59579+ cap_audit = curracl->cap_invert_audit;
59580+
59581+ while ((curracl = curracl->parent_subject)) {
59582+ /* if the cap isn't specified in the current computed mask but is specified in the
59583+ current level subject, and is lowered in the current level subject, then add
59584+ it to the set of dropped capabilities
59585+ otherwise, add the current level subject's mask to the current computed mask
59586+ */
59587+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59588+ cap_raise(cap_mask, cap);
59589+ if (cap_raised(curracl->cap_lower, cap))
59590+ cap_raise(cap_drop, cap);
59591+ if (cap_raised(curracl->cap_invert_audit, cap))
59592+ cap_raise(cap_audit, cap);
59593+ }
59594+ }
59595+
59596+ if (!cap_raised(cap_drop, cap)) {
59597+ if (cap_raised(cap_audit, cap))
59598+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
59599+ return 1;
59600+ }
59601+
59602+ curracl = task->acl;
59603+
59604+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
59605+ && cap_raised(cred->cap_effective, cap)) {
59606+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59607+ task->role->roletype, cred->uid,
59608+ cred->gid, task->exec_file ?
59609+ gr_to_filename(task->exec_file->f_path.dentry,
59610+ task->exec_file->f_path.mnt) : curracl->filename,
59611+ curracl->filename, 0UL,
59612+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
59613+ return 1;
59614+ }
59615+
59616+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
59617+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
59618+ return 0;
59619+}
59620+
59621+int
59622+gr_acl_is_capable_nolog(const int cap)
59623+{
59624+ struct acl_subject_label *curracl;
59625+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59626+
59627+ if (!gr_acl_is_enabled())
59628+ return 1;
59629+
59630+ curracl = current->acl;
59631+
59632+ cap_drop = curracl->cap_lower;
59633+ cap_mask = curracl->cap_mask;
59634+
59635+ while ((curracl = curracl->parent_subject)) {
59636+ /* if the cap isn't specified in the current computed mask but is specified in the
59637+ current level subject, and is lowered in the current level subject, then add
59638+ it to the set of dropped capabilities
59639+ otherwise, add the current level subject's mask to the current computed mask
59640+ */
59641+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59642+ cap_raise(cap_mask, cap);
59643+ if (cap_raised(curracl->cap_lower, cap))
59644+ cap_raise(cap_drop, cap);
59645+ }
59646+ }
59647+
59648+ if (!cap_raised(cap_drop, cap))
59649+ return 1;
59650+
59651+ return 0;
59652+}
59653+
59654diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
59655new file mode 100644
59656index 0000000..d5f210c
59657--- /dev/null
59658+++ b/grsecurity/gracl_fs.c
59659@@ -0,0 +1,433 @@
59660+#include <linux/kernel.h>
59661+#include <linux/sched.h>
59662+#include <linux/types.h>
59663+#include <linux/fs.h>
59664+#include <linux/file.h>
59665+#include <linux/stat.h>
59666+#include <linux/grsecurity.h>
59667+#include <linux/grinternal.h>
59668+#include <linux/gracl.h>
59669+
59670+__u32
59671+gr_acl_handle_hidden_file(const struct dentry * dentry,
59672+ const struct vfsmount * mnt)
59673+{
59674+ __u32 mode;
59675+
59676+ if (unlikely(!dentry->d_inode))
59677+ return GR_FIND;
59678+
59679+ mode =
59680+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
59681+
59682+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
59683+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59684+ return mode;
59685+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
59686+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59687+ return 0;
59688+ } else if (unlikely(!(mode & GR_FIND)))
59689+ return 0;
59690+
59691+ return GR_FIND;
59692+}
59693+
59694+__u32
59695+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59696+ int acc_mode)
59697+{
59698+ __u32 reqmode = GR_FIND;
59699+ __u32 mode;
59700+
59701+ if (unlikely(!dentry->d_inode))
59702+ return reqmode;
59703+
59704+ if (acc_mode & MAY_APPEND)
59705+ reqmode |= GR_APPEND;
59706+ else if (acc_mode & MAY_WRITE)
59707+ reqmode |= GR_WRITE;
59708+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
59709+ reqmode |= GR_READ;
59710+
59711+ mode =
59712+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
59713+ mnt);
59714+
59715+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59716+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
59717+ reqmode & GR_READ ? " reading" : "",
59718+ reqmode & GR_WRITE ? " writing" : reqmode &
59719+ GR_APPEND ? " appending" : "");
59720+ return reqmode;
59721+ } else
59722+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59723+ {
59724+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
59725+ reqmode & GR_READ ? " reading" : "",
59726+ reqmode & GR_WRITE ? " writing" : reqmode &
59727+ GR_APPEND ? " appending" : "");
59728+ return 0;
59729+ } else if (unlikely((mode & reqmode) != reqmode))
59730+ return 0;
59731+
59732+ return reqmode;
59733+}
59734+
59735+__u32
59736+gr_acl_handle_creat(const struct dentry * dentry,
59737+ const struct dentry * p_dentry,
59738+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59739+ const int imode)
59740+{
59741+ __u32 reqmode = GR_WRITE | GR_CREATE;
59742+ __u32 mode;
59743+
59744+ if (acc_mode & MAY_APPEND)
59745+ reqmode |= GR_APPEND;
59746+ // if a directory was required or the directory already exists, then
59747+ // don't count this open as a read
59748+ if ((acc_mode & MAY_READ) &&
59749+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
59750+ reqmode |= GR_READ;
59751+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
59752+ reqmode |= GR_SETID;
59753+
59754+ mode =
59755+ gr_check_create(dentry, p_dentry, p_mnt,
59756+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
59757+
59758+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59759+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
59760+ reqmode & GR_READ ? " reading" : "",
59761+ reqmode & GR_WRITE ? " writing" : reqmode &
59762+ GR_APPEND ? " appending" : "");
59763+ return reqmode;
59764+ } else
59765+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59766+ {
59767+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
59768+ reqmode & GR_READ ? " reading" : "",
59769+ reqmode & GR_WRITE ? " writing" : reqmode &
59770+ GR_APPEND ? " appending" : "");
59771+ return 0;
59772+ } else if (unlikely((mode & reqmode) != reqmode))
59773+ return 0;
59774+
59775+ return reqmode;
59776+}
59777+
59778+__u32
59779+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
59780+ const int fmode)
59781+{
59782+ __u32 mode, reqmode = GR_FIND;
59783+
59784+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
59785+ reqmode |= GR_EXEC;
59786+ if (fmode & S_IWOTH)
59787+ reqmode |= GR_WRITE;
59788+ if (fmode & S_IROTH)
59789+ reqmode |= GR_READ;
59790+
59791+ mode =
59792+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
59793+ mnt);
59794+
59795+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
59796+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
59797+ reqmode & GR_READ ? " reading" : "",
59798+ reqmode & GR_WRITE ? " writing" : "",
59799+ reqmode & GR_EXEC ? " executing" : "");
59800+ return reqmode;
59801+ } else
59802+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
59803+ {
59804+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
59805+ reqmode & GR_READ ? " reading" : "",
59806+ reqmode & GR_WRITE ? " writing" : "",
59807+ reqmode & GR_EXEC ? " executing" : "");
59808+ return 0;
59809+ } else if (unlikely((mode & reqmode) != reqmode))
59810+ return 0;
59811+
59812+ return reqmode;
59813+}
59814+
59815+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
59816+{
59817+ __u32 mode;
59818+
59819+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
59820+
59821+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
59822+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
59823+ return mode;
59824+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
59825+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
59826+ return 0;
59827+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
59828+ return 0;
59829+
59830+ return (reqmode);
59831+}
59832+
59833+__u32
59834+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59835+{
59836+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
59837+}
59838+
59839+__u32
59840+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
59841+{
59842+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
59843+}
59844+
59845+__u32
59846+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
59847+{
59848+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
59849+}
59850+
59851+__u32
59852+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
59853+{
59854+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
59855+}
59856+
59857+__u32
59858+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
59859+ mode_t mode)
59860+{
59861+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
59862+ return 1;
59863+
59864+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
59865+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
59866+ GR_FCHMOD_ACL_MSG);
59867+ } else {
59868+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
59869+ }
59870+}
59871+
59872+__u32
59873+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
59874+ mode_t mode)
59875+{
59876+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
59877+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
59878+ GR_CHMOD_ACL_MSG);
59879+ } else {
59880+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
59881+ }
59882+}
59883+
59884+__u32
59885+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
59886+{
59887+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
59888+}
59889+
59890+__u32
59891+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
59892+{
59893+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
59894+}
59895+
59896+__u32
59897+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
59898+{
59899+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
59900+}
59901+
59902+__u32
59903+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
59904+{
59905+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
59906+ GR_UNIXCONNECT_ACL_MSG);
59907+}
59908+
59909+/* hardlinks require at minimum create and link permission,
59910+ any additional privilege required is based on the
59911+ privilege of the file being linked to
59912+*/
59913+__u32
59914+gr_acl_handle_link(const struct dentry * new_dentry,
59915+ const struct dentry * parent_dentry,
59916+ const struct vfsmount * parent_mnt,
59917+ const struct dentry * old_dentry,
59918+ const struct vfsmount * old_mnt, const char *to)
59919+{
59920+ __u32 mode;
59921+ __u32 needmode = GR_CREATE | GR_LINK;
59922+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
59923+
59924+ mode =
59925+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
59926+ old_mnt);
59927+
59928+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
59929+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
59930+ return mode;
59931+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
59932+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
59933+ return 0;
59934+ } else if (unlikely((mode & needmode) != needmode))
59935+ return 0;
59936+
59937+ return 1;
59938+}
59939+
59940+__u32
59941+gr_acl_handle_symlink(const struct dentry * new_dentry,
59942+ const struct dentry * parent_dentry,
59943+ const struct vfsmount * parent_mnt, const char *from)
59944+{
59945+ __u32 needmode = GR_WRITE | GR_CREATE;
59946+ __u32 mode;
59947+
59948+ mode =
59949+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
59950+ GR_CREATE | GR_AUDIT_CREATE |
59951+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
59952+
59953+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
59954+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
59955+ return mode;
59956+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
59957+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
59958+ return 0;
59959+ } else if (unlikely((mode & needmode) != needmode))
59960+ return 0;
59961+
59962+ return (GR_WRITE | GR_CREATE);
59963+}
59964+
59965+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
59966+{
59967+ __u32 mode;
59968+
59969+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
59970+
59971+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
59972+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
59973+ return mode;
59974+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
59975+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
59976+ return 0;
59977+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
59978+ return 0;
59979+
59980+ return (reqmode);
59981+}
59982+
59983+__u32
59984+gr_acl_handle_mknod(const struct dentry * new_dentry,
59985+ const struct dentry * parent_dentry,
59986+ const struct vfsmount * parent_mnt,
59987+ const int mode)
59988+{
59989+ __u32 reqmode = GR_WRITE | GR_CREATE;
59990+ if (unlikely(mode & (S_ISUID | S_ISGID)))
59991+ reqmode |= GR_SETID;
59992+
59993+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
59994+ reqmode, GR_MKNOD_ACL_MSG);
59995+}
59996+
59997+__u32
59998+gr_acl_handle_mkdir(const struct dentry *new_dentry,
59999+ const struct dentry *parent_dentry,
60000+ const struct vfsmount *parent_mnt)
60001+{
60002+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60003+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60004+}
60005+
60006+#define RENAME_CHECK_SUCCESS(old, new) \
60007+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60008+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60009+
60010+int
60011+gr_acl_handle_rename(struct dentry *new_dentry,
60012+ struct dentry *parent_dentry,
60013+ const struct vfsmount *parent_mnt,
60014+ struct dentry *old_dentry,
60015+ struct inode *old_parent_inode,
60016+ struct vfsmount *old_mnt, const char *newname)
60017+{
60018+ __u32 comp1, comp2;
60019+ int error = 0;
60020+
60021+ if (unlikely(!gr_acl_is_enabled()))
60022+ return 0;
60023+
60024+ if (!new_dentry->d_inode) {
60025+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60026+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60027+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60028+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60029+ GR_DELETE | GR_AUDIT_DELETE |
60030+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60031+ GR_SUPPRESS, old_mnt);
60032+ } else {
60033+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60034+ GR_CREATE | GR_DELETE |
60035+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60036+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60037+ GR_SUPPRESS, parent_mnt);
60038+ comp2 =
60039+ gr_search_file(old_dentry,
60040+ GR_READ | GR_WRITE | GR_AUDIT_READ |
60041+ GR_DELETE | GR_AUDIT_DELETE |
60042+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60043+ }
60044+
60045+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60046+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60047+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60048+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60049+ && !(comp2 & GR_SUPPRESS)) {
60050+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60051+ error = -EACCES;
60052+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60053+ error = -EACCES;
60054+
60055+ return error;
60056+}
60057+
60058+void
60059+gr_acl_handle_exit(void)
60060+{
60061+ u16 id;
60062+ char *rolename;
60063+ struct file *exec_file;
60064+
60065+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60066+ !(current->role->roletype & GR_ROLE_PERSIST))) {
60067+ id = current->acl_role_id;
60068+ rolename = current->role->rolename;
60069+ gr_set_acls(1);
60070+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60071+ }
60072+
60073+ write_lock(&grsec_exec_file_lock);
60074+ exec_file = current->exec_file;
60075+ current->exec_file = NULL;
60076+ write_unlock(&grsec_exec_file_lock);
60077+
60078+ if (exec_file)
60079+ fput(exec_file);
60080+}
60081+
60082+int
60083+gr_acl_handle_procpidmem(const struct task_struct *task)
60084+{
60085+ if (unlikely(!gr_acl_is_enabled()))
60086+ return 0;
60087+
60088+ if (task != current && task->acl->mode & GR_PROTPROCFD)
60089+ return -EACCES;
60090+
60091+ return 0;
60092+}
60093diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60094new file mode 100644
60095index 0000000..cd07b96
60096--- /dev/null
60097+++ b/grsecurity/gracl_ip.c
60098@@ -0,0 +1,382 @@
60099+#include <linux/kernel.h>
60100+#include <asm/uaccess.h>
60101+#include <asm/errno.h>
60102+#include <net/sock.h>
60103+#include <linux/file.h>
60104+#include <linux/fs.h>
60105+#include <linux/net.h>
60106+#include <linux/in.h>
60107+#include <linux/skbuff.h>
60108+#include <linux/ip.h>
60109+#include <linux/udp.h>
60110+#include <linux/smp_lock.h>
60111+#include <linux/types.h>
60112+#include <linux/sched.h>
60113+#include <linux/netdevice.h>
60114+#include <linux/inetdevice.h>
60115+#include <linux/gracl.h>
60116+#include <linux/grsecurity.h>
60117+#include <linux/grinternal.h>
60118+
60119+#define GR_BIND 0x01
60120+#define GR_CONNECT 0x02
60121+#define GR_INVERT 0x04
60122+#define GR_BINDOVERRIDE 0x08
60123+#define GR_CONNECTOVERRIDE 0x10
60124+#define GR_SOCK_FAMILY 0x20
60125+
60126+static const char * gr_protocols[IPPROTO_MAX] = {
60127+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60128+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60129+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60130+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60131+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60132+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60133+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60134+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60135+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60136+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60137+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60138+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60139+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60140+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60141+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60142+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60143+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60144+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60145+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60146+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60147+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60148+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60149+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60150+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60151+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60152+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60153+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60154+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60155+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60156+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60157+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60158+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60159+ };
60160+
60161+static const char * gr_socktypes[SOCK_MAX] = {
60162+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60163+ "unknown:7", "unknown:8", "unknown:9", "packet"
60164+ };
60165+
60166+static const char * gr_sockfamilies[AF_MAX+1] = {
60167+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60168+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60169+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60170+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60171+ };
60172+
60173+const char *
60174+gr_proto_to_name(unsigned char proto)
60175+{
60176+ return gr_protocols[proto];
60177+}
60178+
60179+const char *
60180+gr_socktype_to_name(unsigned char type)
60181+{
60182+ return gr_socktypes[type];
60183+}
60184+
60185+const char *
60186+gr_sockfamily_to_name(unsigned char family)
60187+{
60188+ return gr_sockfamilies[family];
60189+}
60190+
60191+int
60192+gr_search_socket(const int domain, const int type, const int protocol)
60193+{
60194+ struct acl_subject_label *curr;
60195+ const struct cred *cred = current_cred();
60196+
60197+ if (unlikely(!gr_acl_is_enabled()))
60198+ goto exit;
60199+
60200+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
60201+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60202+ goto exit; // let the kernel handle it
60203+
60204+ curr = current->acl;
60205+
60206+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60207+ /* the family is allowed, if this is PF_INET allow it only if
60208+ the extra sock type/protocol checks pass */
60209+ if (domain == PF_INET)
60210+ goto inet_check;
60211+ goto exit;
60212+ } else {
60213+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60214+ __u32 fakeip = 0;
60215+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60216+ current->role->roletype, cred->uid,
60217+ cred->gid, current->exec_file ?
60218+ gr_to_filename(current->exec_file->f_path.dentry,
60219+ current->exec_file->f_path.mnt) :
60220+ curr->filename, curr->filename,
60221+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60222+ &current->signal->saved_ip);
60223+ goto exit;
60224+ }
60225+ goto exit_fail;
60226+ }
60227+
60228+inet_check:
60229+ /* the rest of this checking is for IPv4 only */
60230+ if (!curr->ips)
60231+ goto exit;
60232+
60233+ if ((curr->ip_type & (1 << type)) &&
60234+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60235+ goto exit;
60236+
60237+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60238+ /* we don't place acls on raw sockets , and sometimes
60239+ dgram/ip sockets are opened for ioctl and not
60240+ bind/connect, so we'll fake a bind learn log */
60241+ if (type == SOCK_RAW || type == SOCK_PACKET) {
60242+ __u32 fakeip = 0;
60243+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60244+ current->role->roletype, cred->uid,
60245+ cred->gid, current->exec_file ?
60246+ gr_to_filename(current->exec_file->f_path.dentry,
60247+ current->exec_file->f_path.mnt) :
60248+ curr->filename, curr->filename,
60249+ &fakeip, 0, type,
60250+ protocol, GR_CONNECT, &current->signal->saved_ip);
60251+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60252+ __u32 fakeip = 0;
60253+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60254+ current->role->roletype, cred->uid,
60255+ cred->gid, current->exec_file ?
60256+ gr_to_filename(current->exec_file->f_path.dentry,
60257+ current->exec_file->f_path.mnt) :
60258+ curr->filename, curr->filename,
60259+ &fakeip, 0, type,
60260+ protocol, GR_BIND, &current->signal->saved_ip);
60261+ }
60262+ /* we'll log when they use connect or bind */
60263+ goto exit;
60264+ }
60265+
60266+exit_fail:
60267+ if (domain == PF_INET)
60268+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60269+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
60270+ else
60271+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60272+ gr_socktype_to_name(type), protocol);
60273+
60274+ return 0;
60275+exit:
60276+ return 1;
60277+}
60278+
60279+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60280+{
60281+ if ((ip->mode & mode) &&
60282+ (ip_port >= ip->low) &&
60283+ (ip_port <= ip->high) &&
60284+ ((ntohl(ip_addr) & our_netmask) ==
60285+ (ntohl(our_addr) & our_netmask))
60286+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60287+ && (ip->type & (1 << type))) {
60288+ if (ip->mode & GR_INVERT)
60289+ return 2; // specifically denied
60290+ else
60291+ return 1; // allowed
60292+ }
60293+
60294+ return 0; // not specifically allowed, may continue parsing
60295+}
60296+
60297+static int
60298+gr_search_connectbind(const int full_mode, struct sock *sk,
60299+ struct sockaddr_in *addr, const int type)
60300+{
60301+ char iface[IFNAMSIZ] = {0};
60302+ struct acl_subject_label *curr;
60303+ struct acl_ip_label *ip;
60304+ struct inet_sock *isk;
60305+ struct net_device *dev;
60306+ struct in_device *idev;
60307+ unsigned long i;
60308+ int ret;
60309+ int mode = full_mode & (GR_BIND | GR_CONNECT);
60310+ __u32 ip_addr = 0;
60311+ __u32 our_addr;
60312+ __u32 our_netmask;
60313+ char *p;
60314+ __u16 ip_port = 0;
60315+ const struct cred *cred = current_cred();
60316+
60317+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
60318+ return 0;
60319+
60320+ curr = current->acl;
60321+ isk = inet_sk(sk);
60322+
60323+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
60324+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
60325+ addr->sin_addr.s_addr = curr->inaddr_any_override;
60326+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
60327+ struct sockaddr_in saddr;
60328+ int err;
60329+
60330+ saddr.sin_family = AF_INET;
60331+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
60332+ saddr.sin_port = isk->sport;
60333+
60334+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60335+ if (err)
60336+ return err;
60337+
60338+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60339+ if (err)
60340+ return err;
60341+ }
60342+
60343+ if (!curr->ips)
60344+ return 0;
60345+
60346+ ip_addr = addr->sin_addr.s_addr;
60347+ ip_port = ntohs(addr->sin_port);
60348+
60349+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60350+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60351+ current->role->roletype, cred->uid,
60352+ cred->gid, current->exec_file ?
60353+ gr_to_filename(current->exec_file->f_path.dentry,
60354+ current->exec_file->f_path.mnt) :
60355+ curr->filename, curr->filename,
60356+ &ip_addr, ip_port, type,
60357+ sk->sk_protocol, mode, &current->signal->saved_ip);
60358+ return 0;
60359+ }
60360+
60361+ for (i = 0; i < curr->ip_num; i++) {
60362+ ip = *(curr->ips + i);
60363+ if (ip->iface != NULL) {
60364+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
60365+ p = strchr(iface, ':');
60366+ if (p != NULL)
60367+ *p = '\0';
60368+ dev = dev_get_by_name(sock_net(sk), iface);
60369+ if (dev == NULL)
60370+ continue;
60371+ idev = in_dev_get(dev);
60372+ if (idev == NULL) {
60373+ dev_put(dev);
60374+ continue;
60375+ }
60376+ rcu_read_lock();
60377+ for_ifa(idev) {
60378+ if (!strcmp(ip->iface, ifa->ifa_label)) {
60379+ our_addr = ifa->ifa_address;
60380+ our_netmask = 0xffffffff;
60381+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60382+ if (ret == 1) {
60383+ rcu_read_unlock();
60384+ in_dev_put(idev);
60385+ dev_put(dev);
60386+ return 0;
60387+ } else if (ret == 2) {
60388+ rcu_read_unlock();
60389+ in_dev_put(idev);
60390+ dev_put(dev);
60391+ goto denied;
60392+ }
60393+ }
60394+ } endfor_ifa(idev);
60395+ rcu_read_unlock();
60396+ in_dev_put(idev);
60397+ dev_put(dev);
60398+ } else {
60399+ our_addr = ip->addr;
60400+ our_netmask = ip->netmask;
60401+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60402+ if (ret == 1)
60403+ return 0;
60404+ else if (ret == 2)
60405+ goto denied;
60406+ }
60407+ }
60408+
60409+denied:
60410+ if (mode == GR_BIND)
60411+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60412+ else if (mode == GR_CONNECT)
60413+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60414+
60415+ return -EACCES;
60416+}
60417+
60418+int
60419+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
60420+{
60421+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
60422+}
60423+
60424+int
60425+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
60426+{
60427+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
60428+}
60429+
60430+int gr_search_listen(struct socket *sock)
60431+{
60432+ struct sock *sk = sock->sk;
60433+ struct sockaddr_in addr;
60434+
60435+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60436+ addr.sin_port = inet_sk(sk)->sport;
60437+
60438+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60439+}
60440+
60441+int gr_search_accept(struct socket *sock)
60442+{
60443+ struct sock *sk = sock->sk;
60444+ struct sockaddr_in addr;
60445+
60446+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60447+ addr.sin_port = inet_sk(sk)->sport;
60448+
60449+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60450+}
60451+
60452+int
60453+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
60454+{
60455+ if (addr)
60456+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
60457+ else {
60458+ struct sockaddr_in sin;
60459+ const struct inet_sock *inet = inet_sk(sk);
60460+
60461+ sin.sin_addr.s_addr = inet->daddr;
60462+ sin.sin_port = inet->dport;
60463+
60464+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60465+ }
60466+}
60467+
60468+int
60469+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
60470+{
60471+ struct sockaddr_in sin;
60472+
60473+ if (unlikely(skb->len < sizeof (struct udphdr)))
60474+ return 0; // skip this packet
60475+
60476+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
60477+ sin.sin_port = udp_hdr(skb)->source;
60478+
60479+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60480+}
60481diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
60482new file mode 100644
60483index 0000000..34bdd46
60484--- /dev/null
60485+++ b/grsecurity/gracl_learn.c
60486@@ -0,0 +1,208 @@
60487+#include <linux/kernel.h>
60488+#include <linux/mm.h>
60489+#include <linux/sched.h>
60490+#include <linux/poll.h>
60491+#include <linux/smp_lock.h>
60492+#include <linux/string.h>
60493+#include <linux/file.h>
60494+#include <linux/types.h>
60495+#include <linux/vmalloc.h>
60496+#include <linux/grinternal.h>
60497+
60498+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
60499+ size_t count, loff_t *ppos);
60500+extern int gr_acl_is_enabled(void);
60501+
60502+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
60503+static int gr_learn_attached;
60504+
60505+/* use a 512k buffer */
60506+#define LEARN_BUFFER_SIZE (512 * 1024)
60507+
60508+static DEFINE_SPINLOCK(gr_learn_lock);
60509+static DEFINE_MUTEX(gr_learn_user_mutex);
60510+
60511+/* we need to maintain two buffers, so that the kernel context of grlearn
60512+ uses a semaphore around the userspace copying, and the other kernel contexts
60513+ use a spinlock when copying into the buffer, since they cannot sleep
60514+*/
60515+static char *learn_buffer;
60516+static char *learn_buffer_user;
60517+static int learn_buffer_len;
60518+static int learn_buffer_user_len;
60519+
60520+static ssize_t
60521+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
60522+{
60523+ DECLARE_WAITQUEUE(wait, current);
60524+ ssize_t retval = 0;
60525+
60526+ add_wait_queue(&learn_wait, &wait);
60527+ set_current_state(TASK_INTERRUPTIBLE);
60528+ do {
60529+ mutex_lock(&gr_learn_user_mutex);
60530+ spin_lock(&gr_learn_lock);
60531+ if (learn_buffer_len)
60532+ break;
60533+ spin_unlock(&gr_learn_lock);
60534+ mutex_unlock(&gr_learn_user_mutex);
60535+ if (file->f_flags & O_NONBLOCK) {
60536+ retval = -EAGAIN;
60537+ goto out;
60538+ }
60539+ if (signal_pending(current)) {
60540+ retval = -ERESTARTSYS;
60541+ goto out;
60542+ }
60543+
60544+ schedule();
60545+ } while (1);
60546+
60547+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
60548+ learn_buffer_user_len = learn_buffer_len;
60549+ retval = learn_buffer_len;
60550+ learn_buffer_len = 0;
60551+
60552+ spin_unlock(&gr_learn_lock);
60553+
60554+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
60555+ retval = -EFAULT;
60556+
60557+ mutex_unlock(&gr_learn_user_mutex);
60558+out:
60559+ set_current_state(TASK_RUNNING);
60560+ remove_wait_queue(&learn_wait, &wait);
60561+ return retval;
60562+}
60563+
60564+static unsigned int
60565+poll_learn(struct file * file, poll_table * wait)
60566+{
60567+ poll_wait(file, &learn_wait, wait);
60568+
60569+ if (learn_buffer_len)
60570+ return (POLLIN | POLLRDNORM);
60571+
60572+ return 0;
60573+}
60574+
60575+void
60576+gr_clear_learn_entries(void)
60577+{
60578+ char *tmp;
60579+
60580+ mutex_lock(&gr_learn_user_mutex);
60581+ spin_lock(&gr_learn_lock);
60582+ tmp = learn_buffer;
60583+ learn_buffer = NULL;
60584+ spin_unlock(&gr_learn_lock);
60585+ if (tmp)
60586+ vfree(tmp);
60587+ if (learn_buffer_user != NULL) {
60588+ vfree(learn_buffer_user);
60589+ learn_buffer_user = NULL;
60590+ }
60591+ learn_buffer_len = 0;
60592+ mutex_unlock(&gr_learn_user_mutex);
60593+
60594+ return;
60595+}
60596+
60597+void
60598+gr_add_learn_entry(const char *fmt, ...)
60599+{
60600+ va_list args;
60601+ unsigned int len;
60602+
60603+ if (!gr_learn_attached)
60604+ return;
60605+
60606+ spin_lock(&gr_learn_lock);
60607+
60608+ /* leave a gap at the end so we know when it's "full" but don't have to
60609+ compute the exact length of the string we're trying to append
60610+ */
60611+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
60612+ spin_unlock(&gr_learn_lock);
60613+ wake_up_interruptible(&learn_wait);
60614+ return;
60615+ }
60616+ if (learn_buffer == NULL) {
60617+ spin_unlock(&gr_learn_lock);
60618+ return;
60619+ }
60620+
60621+ va_start(args, fmt);
60622+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
60623+ va_end(args);
60624+
60625+ learn_buffer_len += len + 1;
60626+
60627+ spin_unlock(&gr_learn_lock);
60628+ wake_up_interruptible(&learn_wait);
60629+
60630+ return;
60631+}
60632+
60633+static int
60634+open_learn(struct inode *inode, struct file *file)
60635+{
60636+ if (file->f_mode & FMODE_READ && gr_learn_attached)
60637+ return -EBUSY;
60638+ if (file->f_mode & FMODE_READ) {
60639+ int retval = 0;
60640+ mutex_lock(&gr_learn_user_mutex);
60641+ if (learn_buffer == NULL)
60642+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
60643+ if (learn_buffer_user == NULL)
60644+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
60645+ if (learn_buffer == NULL) {
60646+ retval = -ENOMEM;
60647+ goto out_error;
60648+ }
60649+ if (learn_buffer_user == NULL) {
60650+ retval = -ENOMEM;
60651+ goto out_error;
60652+ }
60653+ learn_buffer_len = 0;
60654+ learn_buffer_user_len = 0;
60655+ gr_learn_attached = 1;
60656+out_error:
60657+ mutex_unlock(&gr_learn_user_mutex);
60658+ return retval;
60659+ }
60660+ return 0;
60661+}
60662+
60663+static int
60664+close_learn(struct inode *inode, struct file *file)
60665+{
60666+ if (file->f_mode & FMODE_READ) {
60667+ char *tmp = NULL;
60668+ mutex_lock(&gr_learn_user_mutex);
60669+ spin_lock(&gr_learn_lock);
60670+ tmp = learn_buffer;
60671+ learn_buffer = NULL;
60672+ spin_unlock(&gr_learn_lock);
60673+ if (tmp)
60674+ vfree(tmp);
60675+ if (learn_buffer_user != NULL) {
60676+ vfree(learn_buffer_user);
60677+ learn_buffer_user = NULL;
60678+ }
60679+ learn_buffer_len = 0;
60680+ learn_buffer_user_len = 0;
60681+ gr_learn_attached = 0;
60682+ mutex_unlock(&gr_learn_user_mutex);
60683+ }
60684+
60685+ return 0;
60686+}
60687+
60688+const struct file_operations grsec_fops = {
60689+ .read = read_learn,
60690+ .write = write_grsec_handler,
60691+ .open = open_learn,
60692+ .release = close_learn,
60693+ .poll = poll_learn,
60694+};
60695diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
60696new file mode 100644
60697index 0000000..70b2179
60698--- /dev/null
60699+++ b/grsecurity/gracl_res.c
60700@@ -0,0 +1,67 @@
60701+#include <linux/kernel.h>
60702+#include <linux/sched.h>
60703+#include <linux/gracl.h>
60704+#include <linux/grinternal.h>
60705+
60706+static const char *restab_log[] = {
60707+ [RLIMIT_CPU] = "RLIMIT_CPU",
60708+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
60709+ [RLIMIT_DATA] = "RLIMIT_DATA",
60710+ [RLIMIT_STACK] = "RLIMIT_STACK",
60711+ [RLIMIT_CORE] = "RLIMIT_CORE",
60712+ [RLIMIT_RSS] = "RLIMIT_RSS",
60713+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
60714+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
60715+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
60716+ [RLIMIT_AS] = "RLIMIT_AS",
60717+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
60718+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
60719+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
60720+ [RLIMIT_NICE] = "RLIMIT_NICE",
60721+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
60722+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
60723+ [GR_CRASH_RES] = "RLIMIT_CRASH"
60724+};
60725+
60726+void
60727+gr_log_resource(const struct task_struct *task,
60728+ const int res, const unsigned long wanted, const int gt)
60729+{
60730+ const struct cred *cred;
60731+ unsigned long rlim;
60732+
60733+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
60734+ return;
60735+
60736+ // not yet supported resource
60737+ if (unlikely(!restab_log[res]))
60738+ return;
60739+
60740+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
60741+ rlim = task->signal->rlim[res].rlim_max;
60742+ else
60743+ rlim = task->signal->rlim[res].rlim_cur;
60744+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
60745+ return;
60746+
60747+ rcu_read_lock();
60748+ cred = __task_cred(task);
60749+
60750+ if (res == RLIMIT_NPROC &&
60751+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
60752+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
60753+ goto out_rcu_unlock;
60754+ else if (res == RLIMIT_MEMLOCK &&
60755+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
60756+ goto out_rcu_unlock;
60757+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
60758+ goto out_rcu_unlock;
60759+ rcu_read_unlock();
60760+
60761+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
60762+
60763+ return;
60764+out_rcu_unlock:
60765+ rcu_read_unlock();
60766+ return;
60767+}
60768diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
60769new file mode 100644
60770index 0000000..1d1b734
60771--- /dev/null
60772+++ b/grsecurity/gracl_segv.c
60773@@ -0,0 +1,284 @@
60774+#include <linux/kernel.h>
60775+#include <linux/mm.h>
60776+#include <asm/uaccess.h>
60777+#include <asm/errno.h>
60778+#include <asm/mman.h>
60779+#include <net/sock.h>
60780+#include <linux/file.h>
60781+#include <linux/fs.h>
60782+#include <linux/net.h>
60783+#include <linux/in.h>
60784+#include <linux/smp_lock.h>
60785+#include <linux/slab.h>
60786+#include <linux/types.h>
60787+#include <linux/sched.h>
60788+#include <linux/timer.h>
60789+#include <linux/gracl.h>
60790+#include <linux/grsecurity.h>
60791+#include <linux/grinternal.h>
60792+
60793+static struct crash_uid *uid_set;
60794+static unsigned short uid_used;
60795+static DEFINE_SPINLOCK(gr_uid_lock);
60796+extern rwlock_t gr_inode_lock;
60797+extern struct acl_subject_label *
60798+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
60799+ struct acl_role_label *role);
60800+extern int gr_fake_force_sig(int sig, struct task_struct *t);
60801+
60802+int
60803+gr_init_uidset(void)
60804+{
60805+ uid_set =
60806+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
60807+ uid_used = 0;
60808+
60809+ return uid_set ? 1 : 0;
60810+}
60811+
60812+void
60813+gr_free_uidset(void)
60814+{
60815+ if (uid_set)
60816+ kfree(uid_set);
60817+
60818+ return;
60819+}
60820+
60821+int
60822+gr_find_uid(const uid_t uid)
60823+{
60824+ struct crash_uid *tmp = uid_set;
60825+ uid_t buid;
60826+ int low = 0, high = uid_used - 1, mid;
60827+
60828+ while (high >= low) {
60829+ mid = (low + high) >> 1;
60830+ buid = tmp[mid].uid;
60831+ if (buid == uid)
60832+ return mid;
60833+ if (buid > uid)
60834+ high = mid - 1;
60835+ if (buid < uid)
60836+ low = mid + 1;
60837+ }
60838+
60839+ return -1;
60840+}
60841+
60842+static __inline__ void
60843+gr_insertsort(void)
60844+{
60845+ unsigned short i, j;
60846+ struct crash_uid index;
60847+
60848+ for (i = 1; i < uid_used; i++) {
60849+ index = uid_set[i];
60850+ j = i;
60851+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
60852+ uid_set[j] = uid_set[j - 1];
60853+ j--;
60854+ }
60855+ uid_set[j] = index;
60856+ }
60857+
60858+ return;
60859+}
60860+
60861+static __inline__ void
60862+gr_insert_uid(const uid_t uid, const unsigned long expires)
60863+{
60864+ int loc;
60865+
60866+ if (uid_used == GR_UIDTABLE_MAX)
60867+ return;
60868+
60869+ loc = gr_find_uid(uid);
60870+
60871+ if (loc >= 0) {
60872+ uid_set[loc].expires = expires;
60873+ return;
60874+ }
60875+
60876+ uid_set[uid_used].uid = uid;
60877+ uid_set[uid_used].expires = expires;
60878+ uid_used++;
60879+
60880+ gr_insertsort();
60881+
60882+ return;
60883+}
60884+
60885+void
60886+gr_remove_uid(const unsigned short loc)
60887+{
60888+ unsigned short i;
60889+
60890+ for (i = loc + 1; i < uid_used; i++)
60891+ uid_set[i - 1] = uid_set[i];
60892+
60893+ uid_used--;
60894+
60895+ return;
60896+}
60897+
60898+int
60899+gr_check_crash_uid(const uid_t uid)
60900+{
60901+ int loc;
60902+ int ret = 0;
60903+
60904+ if (unlikely(!gr_acl_is_enabled()))
60905+ return 0;
60906+
60907+ spin_lock(&gr_uid_lock);
60908+ loc = gr_find_uid(uid);
60909+
60910+ if (loc < 0)
60911+ goto out_unlock;
60912+
60913+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
60914+ gr_remove_uid(loc);
60915+ else
60916+ ret = 1;
60917+
60918+out_unlock:
60919+ spin_unlock(&gr_uid_lock);
60920+ return ret;
60921+}
60922+
60923+static __inline__ int
60924+proc_is_setxid(const struct cred *cred)
60925+{
60926+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
60927+ cred->uid != cred->fsuid)
60928+ return 1;
60929+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
60930+ cred->gid != cred->fsgid)
60931+ return 1;
60932+
60933+ return 0;
60934+}
60935+
60936+void
60937+gr_handle_crash(struct task_struct *task, const int sig)
60938+{
60939+ struct acl_subject_label *curr;
60940+ struct task_struct *tsk, *tsk2;
60941+ const struct cred *cred;
60942+ const struct cred *cred2;
60943+
60944+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
60945+ return;
60946+
60947+ if (unlikely(!gr_acl_is_enabled()))
60948+ return;
60949+
60950+ curr = task->acl;
60951+
60952+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
60953+ return;
60954+
60955+ if (time_before_eq(curr->expires, get_seconds())) {
60956+ curr->expires = 0;
60957+ curr->crashes = 0;
60958+ }
60959+
60960+ curr->crashes++;
60961+
60962+ if (!curr->expires)
60963+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
60964+
60965+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
60966+ time_after(curr->expires, get_seconds())) {
60967+ rcu_read_lock();
60968+ cred = __task_cred(task);
60969+ if (cred->uid && proc_is_setxid(cred)) {
60970+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
60971+ spin_lock(&gr_uid_lock);
60972+ gr_insert_uid(cred->uid, curr->expires);
60973+ spin_unlock(&gr_uid_lock);
60974+ curr->expires = 0;
60975+ curr->crashes = 0;
60976+ read_lock(&tasklist_lock);
60977+ do_each_thread(tsk2, tsk) {
60978+ cred2 = __task_cred(tsk);
60979+ if (tsk != task && cred2->uid == cred->uid)
60980+ gr_fake_force_sig(SIGKILL, tsk);
60981+ } while_each_thread(tsk2, tsk);
60982+ read_unlock(&tasklist_lock);
60983+ } else {
60984+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
60985+ read_lock(&tasklist_lock);
60986+ read_lock(&grsec_exec_file_lock);
60987+ do_each_thread(tsk2, tsk) {
60988+ if (likely(tsk != task)) {
60989+ // if this thread has the same subject as the one that triggered
60990+ // RES_CRASH and it's the same binary, kill it
60991+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
60992+ gr_fake_force_sig(SIGKILL, tsk);
60993+ }
60994+ } while_each_thread(tsk2, tsk);
60995+ read_unlock(&grsec_exec_file_lock);
60996+ read_unlock(&tasklist_lock);
60997+ }
60998+ rcu_read_unlock();
60999+ }
61000+
61001+ return;
61002+}
61003+
61004+int
61005+gr_check_crash_exec(const struct file *filp)
61006+{
61007+ struct acl_subject_label *curr;
61008+
61009+ if (unlikely(!gr_acl_is_enabled()))
61010+ return 0;
61011+
61012+ read_lock(&gr_inode_lock);
61013+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61014+ filp->f_path.dentry->d_inode->i_sb->s_dev,
61015+ current->role);
61016+ read_unlock(&gr_inode_lock);
61017+
61018+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61019+ (!curr->crashes && !curr->expires))
61020+ return 0;
61021+
61022+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61023+ time_after(curr->expires, get_seconds()))
61024+ return 1;
61025+ else if (time_before_eq(curr->expires, get_seconds())) {
61026+ curr->crashes = 0;
61027+ curr->expires = 0;
61028+ }
61029+
61030+ return 0;
61031+}
61032+
61033+void
61034+gr_handle_alertkill(struct task_struct *task)
61035+{
61036+ struct acl_subject_label *curracl;
61037+ __u32 curr_ip;
61038+ struct task_struct *p, *p2;
61039+
61040+ if (unlikely(!gr_acl_is_enabled()))
61041+ return;
61042+
61043+ curracl = task->acl;
61044+ curr_ip = task->signal->curr_ip;
61045+
61046+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61047+ read_lock(&tasklist_lock);
61048+ do_each_thread(p2, p) {
61049+ if (p->signal->curr_ip == curr_ip)
61050+ gr_fake_force_sig(SIGKILL, p);
61051+ } while_each_thread(p2, p);
61052+ read_unlock(&tasklist_lock);
61053+ } else if (curracl->mode & GR_KILLPROC)
61054+ gr_fake_force_sig(SIGKILL, task);
61055+
61056+ return;
61057+}
61058diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61059new file mode 100644
61060index 0000000..9d83a69
61061--- /dev/null
61062+++ b/grsecurity/gracl_shm.c
61063@@ -0,0 +1,40 @@
61064+#include <linux/kernel.h>
61065+#include <linux/mm.h>
61066+#include <linux/sched.h>
61067+#include <linux/file.h>
61068+#include <linux/ipc.h>
61069+#include <linux/gracl.h>
61070+#include <linux/grsecurity.h>
61071+#include <linux/grinternal.h>
61072+
61073+int
61074+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61075+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61076+{
61077+ struct task_struct *task;
61078+
61079+ if (!gr_acl_is_enabled())
61080+ return 1;
61081+
61082+ rcu_read_lock();
61083+ read_lock(&tasklist_lock);
61084+
61085+ task = find_task_by_vpid(shm_cprid);
61086+
61087+ if (unlikely(!task))
61088+ task = find_task_by_vpid(shm_lapid);
61089+
61090+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61091+ (task->pid == shm_lapid)) &&
61092+ (task->acl->mode & GR_PROTSHM) &&
61093+ (task->acl != current->acl))) {
61094+ read_unlock(&tasklist_lock);
61095+ rcu_read_unlock();
61096+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61097+ return 0;
61098+ }
61099+ read_unlock(&tasklist_lock);
61100+ rcu_read_unlock();
61101+
61102+ return 1;
61103+}
61104diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61105new file mode 100644
61106index 0000000..bc0be01
61107--- /dev/null
61108+++ b/grsecurity/grsec_chdir.c
61109@@ -0,0 +1,19 @@
61110+#include <linux/kernel.h>
61111+#include <linux/sched.h>
61112+#include <linux/fs.h>
61113+#include <linux/file.h>
61114+#include <linux/grsecurity.h>
61115+#include <linux/grinternal.h>
61116+
61117+void
61118+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61119+{
61120+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61121+ if ((grsec_enable_chdir && grsec_enable_group &&
61122+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61123+ !grsec_enable_group)) {
61124+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61125+ }
61126+#endif
61127+ return;
61128+}
61129diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61130new file mode 100644
61131index 0000000..197bdd5
61132--- /dev/null
61133+++ b/grsecurity/grsec_chroot.c
61134@@ -0,0 +1,386 @@
61135+#include <linux/kernel.h>
61136+#include <linux/module.h>
61137+#include <linux/sched.h>
61138+#include <linux/file.h>
61139+#include <linux/fs.h>
61140+#include <linux/mount.h>
61141+#include <linux/types.h>
61142+#include <linux/pid_namespace.h>
61143+#include <linux/grsecurity.h>
61144+#include <linux/grinternal.h>
61145+
61146+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61147+{
61148+#ifdef CONFIG_GRKERNSEC
61149+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61150+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61151+ task->gr_is_chrooted = 1;
61152+ else
61153+ task->gr_is_chrooted = 0;
61154+
61155+ task->gr_chroot_dentry = path->dentry;
61156+#endif
61157+ return;
61158+}
61159+
61160+void gr_clear_chroot_entries(struct task_struct *task)
61161+{
61162+#ifdef CONFIG_GRKERNSEC
61163+ task->gr_is_chrooted = 0;
61164+ task->gr_chroot_dentry = NULL;
61165+#endif
61166+ return;
61167+}
61168+
61169+int
61170+gr_handle_chroot_unix(const pid_t pid)
61171+{
61172+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61173+ struct task_struct *p;
61174+
61175+ if (unlikely(!grsec_enable_chroot_unix))
61176+ return 1;
61177+
61178+ if (likely(!proc_is_chrooted(current)))
61179+ return 1;
61180+
61181+ rcu_read_lock();
61182+ read_lock(&tasklist_lock);
61183+
61184+ p = find_task_by_vpid_unrestricted(pid);
61185+ if (unlikely(p && !have_same_root(current, p))) {
61186+ read_unlock(&tasklist_lock);
61187+ rcu_read_unlock();
61188+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61189+ return 0;
61190+ }
61191+ read_unlock(&tasklist_lock);
61192+ rcu_read_unlock();
61193+#endif
61194+ return 1;
61195+}
61196+
61197+int
61198+gr_handle_chroot_nice(void)
61199+{
61200+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61201+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61202+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61203+ return -EPERM;
61204+ }
61205+#endif
61206+ return 0;
61207+}
61208+
61209+int
61210+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61211+{
61212+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61213+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61214+ && proc_is_chrooted(current)) {
61215+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61216+ return -EACCES;
61217+ }
61218+#endif
61219+ return 0;
61220+}
61221+
61222+int
61223+gr_handle_chroot_rawio(const struct inode *inode)
61224+{
61225+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61226+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61227+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61228+ return 1;
61229+#endif
61230+ return 0;
61231+}
61232+
61233+int
61234+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61235+{
61236+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61237+ struct task_struct *p;
61238+ int ret = 0;
61239+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61240+ return ret;
61241+
61242+ read_lock(&tasklist_lock);
61243+ do_each_pid_task(pid, type, p) {
61244+ if (!have_same_root(current, p)) {
61245+ ret = 1;
61246+ goto out;
61247+ }
61248+ } while_each_pid_task(pid, type, p);
61249+out:
61250+ read_unlock(&tasklist_lock);
61251+ return ret;
61252+#endif
61253+ return 0;
61254+}
61255+
61256+int
61257+gr_pid_is_chrooted(struct task_struct *p)
61258+{
61259+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61260+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61261+ return 0;
61262+
61263+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61264+ !have_same_root(current, p)) {
61265+ return 1;
61266+ }
61267+#endif
61268+ return 0;
61269+}
61270+
61271+EXPORT_SYMBOL(gr_pid_is_chrooted);
61272+
61273+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61274+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61275+{
61276+ struct dentry *dentry = (struct dentry *)u_dentry;
61277+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61278+ struct dentry *realroot;
61279+ struct vfsmount *realrootmnt;
61280+ struct dentry *currentroot;
61281+ struct vfsmount *currentmnt;
61282+ struct task_struct *reaper = &init_task;
61283+ int ret = 1;
61284+
61285+ read_lock(&reaper->fs->lock);
61286+ realrootmnt = mntget(reaper->fs->root.mnt);
61287+ realroot = dget(reaper->fs->root.dentry);
61288+ read_unlock(&reaper->fs->lock);
61289+
61290+ read_lock(&current->fs->lock);
61291+ currentmnt = mntget(current->fs->root.mnt);
61292+ currentroot = dget(current->fs->root.dentry);
61293+ read_unlock(&current->fs->lock);
61294+
61295+ spin_lock(&dcache_lock);
61296+ for (;;) {
61297+ if (unlikely((dentry == realroot && mnt == realrootmnt)
61298+ || (dentry == currentroot && mnt == currentmnt)))
61299+ break;
61300+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61301+ if (mnt->mnt_parent == mnt)
61302+ break;
61303+ dentry = mnt->mnt_mountpoint;
61304+ mnt = mnt->mnt_parent;
61305+ continue;
61306+ }
61307+ dentry = dentry->d_parent;
61308+ }
61309+ spin_unlock(&dcache_lock);
61310+
61311+ dput(currentroot);
61312+ mntput(currentmnt);
61313+
61314+ /* access is outside of chroot */
61315+ if (dentry == realroot && mnt == realrootmnt)
61316+ ret = 0;
61317+
61318+ dput(realroot);
61319+ mntput(realrootmnt);
61320+ return ret;
61321+}
61322+#endif
61323+
61324+int
61325+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
61326+{
61327+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61328+ if (!grsec_enable_chroot_fchdir)
61329+ return 1;
61330+
61331+ if (!proc_is_chrooted(current))
61332+ return 1;
61333+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
61334+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
61335+ return 0;
61336+ }
61337+#endif
61338+ return 1;
61339+}
61340+
61341+int
61342+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61343+ const time_t shm_createtime)
61344+{
61345+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61346+ struct task_struct *p;
61347+ time_t starttime;
61348+
61349+ if (unlikely(!grsec_enable_chroot_shmat))
61350+ return 1;
61351+
61352+ if (likely(!proc_is_chrooted(current)))
61353+ return 1;
61354+
61355+ rcu_read_lock();
61356+ read_lock(&tasklist_lock);
61357+
61358+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
61359+ starttime = p->start_time.tv_sec;
61360+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
61361+ if (have_same_root(current, p)) {
61362+ goto allow;
61363+ } else {
61364+ read_unlock(&tasklist_lock);
61365+ rcu_read_unlock();
61366+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61367+ return 0;
61368+ }
61369+ }
61370+ /* creator exited, pid reuse, fall through to next check */
61371+ }
61372+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
61373+ if (unlikely(!have_same_root(current, p))) {
61374+ read_unlock(&tasklist_lock);
61375+ rcu_read_unlock();
61376+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61377+ return 0;
61378+ }
61379+ }
61380+
61381+allow:
61382+ read_unlock(&tasklist_lock);
61383+ rcu_read_unlock();
61384+#endif
61385+ return 1;
61386+}
61387+
61388+void
61389+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
61390+{
61391+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61392+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
61393+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
61394+#endif
61395+ return;
61396+}
61397+
61398+int
61399+gr_handle_chroot_mknod(const struct dentry *dentry,
61400+ const struct vfsmount *mnt, const int mode)
61401+{
61402+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61403+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
61404+ proc_is_chrooted(current)) {
61405+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
61406+ return -EPERM;
61407+ }
61408+#endif
61409+ return 0;
61410+}
61411+
61412+int
61413+gr_handle_chroot_mount(const struct dentry *dentry,
61414+ const struct vfsmount *mnt, const char *dev_name)
61415+{
61416+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61417+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
61418+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
61419+ return -EPERM;
61420+ }
61421+#endif
61422+ return 0;
61423+}
61424+
61425+int
61426+gr_handle_chroot_pivot(void)
61427+{
61428+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61429+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
61430+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
61431+ return -EPERM;
61432+ }
61433+#endif
61434+ return 0;
61435+}
61436+
61437+int
61438+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
61439+{
61440+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61441+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
61442+ !gr_is_outside_chroot(dentry, mnt)) {
61443+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
61444+ return -EPERM;
61445+ }
61446+#endif
61447+ return 0;
61448+}
61449+
61450+extern const char *captab_log[];
61451+extern int captab_log_entries;
61452+
61453+int
61454+gr_chroot_is_capable(const int cap)
61455+{
61456+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61457+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61458+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61459+ if (cap_raised(chroot_caps, cap)) {
61460+ const struct cred *creds = current_cred();
61461+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
61462+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
61463+ }
61464+ return 0;
61465+ }
61466+ }
61467+#endif
61468+ return 1;
61469+}
61470+
61471+int
61472+gr_chroot_is_capable_nolog(const int cap)
61473+{
61474+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61475+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61476+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61477+ if (cap_raised(chroot_caps, cap)) {
61478+ return 0;
61479+ }
61480+ }
61481+#endif
61482+ return 1;
61483+}
61484+
61485+int
61486+gr_handle_chroot_sysctl(const int op)
61487+{
61488+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61489+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
61490+ && (op & MAY_WRITE))
61491+ return -EACCES;
61492+#endif
61493+ return 0;
61494+}
61495+
61496+void
61497+gr_handle_chroot_chdir(struct path *path)
61498+{
61499+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61500+ if (grsec_enable_chroot_chdir)
61501+ set_fs_pwd(current->fs, path);
61502+#endif
61503+ return;
61504+}
61505+
61506+int
61507+gr_handle_chroot_chmod(const struct dentry *dentry,
61508+ const struct vfsmount *mnt, const int mode)
61509+{
61510+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61511+ /* allow chmod +s on directories, but not on files */
61512+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
61513+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
61514+ proc_is_chrooted(current)) {
61515+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
61516+ return -EPERM;
61517+ }
61518+#endif
61519+ return 0;
61520+}
61521diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
61522new file mode 100644
61523index 0000000..b81db5b
61524--- /dev/null
61525+++ b/grsecurity/grsec_disabled.c
61526@@ -0,0 +1,439 @@
61527+#include <linux/kernel.h>
61528+#include <linux/module.h>
61529+#include <linux/sched.h>
61530+#include <linux/file.h>
61531+#include <linux/fs.h>
61532+#include <linux/kdev_t.h>
61533+#include <linux/net.h>
61534+#include <linux/in.h>
61535+#include <linux/ip.h>
61536+#include <linux/skbuff.h>
61537+#include <linux/sysctl.h>
61538+
61539+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61540+void
61541+pax_set_initial_flags(struct linux_binprm *bprm)
61542+{
61543+ return;
61544+}
61545+#endif
61546+
61547+#ifdef CONFIG_SYSCTL
61548+__u32
61549+gr_handle_sysctl(const struct ctl_table * table, const int op)
61550+{
61551+ return 0;
61552+}
61553+#endif
61554+
61555+#ifdef CONFIG_TASKSTATS
61556+int gr_is_taskstats_denied(int pid)
61557+{
61558+ return 0;
61559+}
61560+#endif
61561+
61562+int
61563+gr_acl_is_enabled(void)
61564+{
61565+ return 0;
61566+}
61567+
61568+void
61569+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61570+{
61571+ return;
61572+}
61573+
61574+int
61575+gr_handle_rawio(const struct inode *inode)
61576+{
61577+ return 0;
61578+}
61579+
61580+void
61581+gr_acl_handle_psacct(struct task_struct *task, const long code)
61582+{
61583+ return;
61584+}
61585+
61586+int
61587+gr_handle_ptrace(struct task_struct *task, const long request)
61588+{
61589+ return 0;
61590+}
61591+
61592+int
61593+gr_handle_proc_ptrace(struct task_struct *task)
61594+{
61595+ return 0;
61596+}
61597+
61598+void
61599+gr_learn_resource(const struct task_struct *task,
61600+ const int res, const unsigned long wanted, const int gt)
61601+{
61602+ return;
61603+}
61604+
61605+int
61606+gr_set_acls(const int type)
61607+{
61608+ return 0;
61609+}
61610+
61611+int
61612+gr_check_hidden_task(const struct task_struct *tsk)
61613+{
61614+ return 0;
61615+}
61616+
61617+int
61618+gr_check_protected_task(const struct task_struct *task)
61619+{
61620+ return 0;
61621+}
61622+
61623+int
61624+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
61625+{
61626+ return 0;
61627+}
61628+
61629+void
61630+gr_copy_label(struct task_struct *tsk)
61631+{
61632+ return;
61633+}
61634+
61635+void
61636+gr_set_pax_flags(struct task_struct *task)
61637+{
61638+ return;
61639+}
61640+
61641+int
61642+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
61643+ const int unsafe_share)
61644+{
61645+ return 0;
61646+}
61647+
61648+void
61649+gr_handle_delete(const ino_t ino, const dev_t dev)
61650+{
61651+ return;
61652+}
61653+
61654+void
61655+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61656+{
61657+ return;
61658+}
61659+
61660+void
61661+gr_handle_crash(struct task_struct *task, const int sig)
61662+{
61663+ return;
61664+}
61665+
61666+int
61667+gr_check_crash_exec(const struct file *filp)
61668+{
61669+ return 0;
61670+}
61671+
61672+int
61673+gr_check_crash_uid(const uid_t uid)
61674+{
61675+ return 0;
61676+}
61677+
61678+void
61679+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61680+ struct dentry *old_dentry,
61681+ struct dentry *new_dentry,
61682+ struct vfsmount *mnt, const __u8 replace)
61683+{
61684+ return;
61685+}
61686+
61687+int
61688+gr_search_socket(const int family, const int type, const int protocol)
61689+{
61690+ return 1;
61691+}
61692+
61693+int
61694+gr_search_connectbind(const int mode, const struct socket *sock,
61695+ const struct sockaddr_in *addr)
61696+{
61697+ return 0;
61698+}
61699+
61700+void
61701+gr_handle_alertkill(struct task_struct *task)
61702+{
61703+ return;
61704+}
61705+
61706+__u32
61707+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
61708+{
61709+ return 1;
61710+}
61711+
61712+__u32
61713+gr_acl_handle_hidden_file(const struct dentry * dentry,
61714+ const struct vfsmount * mnt)
61715+{
61716+ return 1;
61717+}
61718+
61719+__u32
61720+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61721+ int acc_mode)
61722+{
61723+ return 1;
61724+}
61725+
61726+__u32
61727+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61728+{
61729+ return 1;
61730+}
61731+
61732+__u32
61733+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
61734+{
61735+ return 1;
61736+}
61737+
61738+int
61739+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
61740+ unsigned int *vm_flags)
61741+{
61742+ return 1;
61743+}
61744+
61745+__u32
61746+gr_acl_handle_truncate(const struct dentry * dentry,
61747+ const struct vfsmount * mnt)
61748+{
61749+ return 1;
61750+}
61751+
61752+__u32
61753+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
61754+{
61755+ return 1;
61756+}
61757+
61758+__u32
61759+gr_acl_handle_access(const struct dentry * dentry,
61760+ const struct vfsmount * mnt, const int fmode)
61761+{
61762+ return 1;
61763+}
61764+
61765+__u32
61766+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
61767+ mode_t mode)
61768+{
61769+ return 1;
61770+}
61771+
61772+__u32
61773+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
61774+ mode_t mode)
61775+{
61776+ return 1;
61777+}
61778+
61779+__u32
61780+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
61781+{
61782+ return 1;
61783+}
61784+
61785+__u32
61786+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
61787+{
61788+ return 1;
61789+}
61790+
61791+void
61792+grsecurity_init(void)
61793+{
61794+ return;
61795+}
61796+
61797+__u32
61798+gr_acl_handle_mknod(const struct dentry * new_dentry,
61799+ const struct dentry * parent_dentry,
61800+ const struct vfsmount * parent_mnt,
61801+ const int mode)
61802+{
61803+ return 1;
61804+}
61805+
61806+__u32
61807+gr_acl_handle_mkdir(const struct dentry * new_dentry,
61808+ const struct dentry * parent_dentry,
61809+ const struct vfsmount * parent_mnt)
61810+{
61811+ return 1;
61812+}
61813+
61814+__u32
61815+gr_acl_handle_symlink(const struct dentry * new_dentry,
61816+ const struct dentry * parent_dentry,
61817+ const struct vfsmount * parent_mnt, const char *from)
61818+{
61819+ return 1;
61820+}
61821+
61822+__u32
61823+gr_acl_handle_link(const struct dentry * new_dentry,
61824+ const struct dentry * parent_dentry,
61825+ const struct vfsmount * parent_mnt,
61826+ const struct dentry * old_dentry,
61827+ const struct vfsmount * old_mnt, const char *to)
61828+{
61829+ return 1;
61830+}
61831+
61832+int
61833+gr_acl_handle_rename(const struct dentry *new_dentry,
61834+ const struct dentry *parent_dentry,
61835+ const struct vfsmount *parent_mnt,
61836+ const struct dentry *old_dentry,
61837+ const struct inode *old_parent_inode,
61838+ const struct vfsmount *old_mnt, const char *newname)
61839+{
61840+ return 0;
61841+}
61842+
61843+int
61844+gr_acl_handle_filldir(const struct file *file, const char *name,
61845+ const int namelen, const ino_t ino)
61846+{
61847+ return 1;
61848+}
61849+
61850+int
61851+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61852+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61853+{
61854+ return 1;
61855+}
61856+
61857+int
61858+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
61859+{
61860+ return 0;
61861+}
61862+
61863+int
61864+gr_search_accept(const struct socket *sock)
61865+{
61866+ return 0;
61867+}
61868+
61869+int
61870+gr_search_listen(const struct socket *sock)
61871+{
61872+ return 0;
61873+}
61874+
61875+int
61876+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
61877+{
61878+ return 0;
61879+}
61880+
61881+__u32
61882+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
61883+{
61884+ return 1;
61885+}
61886+
61887+__u32
61888+gr_acl_handle_creat(const struct dentry * dentry,
61889+ const struct dentry * p_dentry,
61890+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61891+ const int imode)
61892+{
61893+ return 1;
61894+}
61895+
61896+void
61897+gr_acl_handle_exit(void)
61898+{
61899+ return;
61900+}
61901+
61902+int
61903+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61904+{
61905+ return 1;
61906+}
61907+
61908+void
61909+gr_set_role_label(const uid_t uid, const gid_t gid)
61910+{
61911+ return;
61912+}
61913+
61914+int
61915+gr_acl_handle_procpidmem(const struct task_struct *task)
61916+{
61917+ return 0;
61918+}
61919+
61920+int
61921+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
61922+{
61923+ return 0;
61924+}
61925+
61926+int
61927+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
61928+{
61929+ return 0;
61930+}
61931+
61932+void
61933+gr_set_kernel_label(struct task_struct *task)
61934+{
61935+ return;
61936+}
61937+
61938+int
61939+gr_check_user_change(int real, int effective, int fs)
61940+{
61941+ return 0;
61942+}
61943+
61944+int
61945+gr_check_group_change(int real, int effective, int fs)
61946+{
61947+ return 0;
61948+}
61949+
61950+int gr_acl_enable_at_secure(void)
61951+{
61952+ return 0;
61953+}
61954+
61955+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
61956+{
61957+ return dentry->d_inode->i_sb->s_dev;
61958+}
61959+
61960+EXPORT_SYMBOL(gr_learn_resource);
61961+EXPORT_SYMBOL(gr_set_kernel_label);
61962+#ifdef CONFIG_SECURITY
61963+EXPORT_SYMBOL(gr_check_user_change);
61964+EXPORT_SYMBOL(gr_check_group_change);
61965+#endif
61966diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
61967new file mode 100644
61968index 0000000..a96e155
61969--- /dev/null
61970+++ b/grsecurity/grsec_exec.c
61971@@ -0,0 +1,204 @@
61972+#include <linux/kernel.h>
61973+#include <linux/sched.h>
61974+#include <linux/file.h>
61975+#include <linux/binfmts.h>
61976+#include <linux/smp_lock.h>
61977+#include <linux/fs.h>
61978+#include <linux/types.h>
61979+#include <linux/grdefs.h>
61980+#include <linux/grinternal.h>
61981+#include <linux/capability.h>
61982+#include <linux/compat.h>
61983+#include <linux/module.h>
61984+
61985+#include <asm/uaccess.h>
61986+
61987+#ifdef CONFIG_GRKERNSEC_EXECLOG
61988+static char gr_exec_arg_buf[132];
61989+static DEFINE_MUTEX(gr_exec_arg_mutex);
61990+#endif
61991+
61992+void
61993+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
61994+{
61995+#ifdef CONFIG_GRKERNSEC_EXECLOG
61996+ char *grarg = gr_exec_arg_buf;
61997+ unsigned int i, x, execlen = 0;
61998+ char c;
61999+
62000+ if (!((grsec_enable_execlog && grsec_enable_group &&
62001+ in_group_p(grsec_audit_gid))
62002+ || (grsec_enable_execlog && !grsec_enable_group)))
62003+ return;
62004+
62005+ mutex_lock(&gr_exec_arg_mutex);
62006+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62007+
62008+ if (unlikely(argv == NULL))
62009+ goto log;
62010+
62011+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62012+ const char __user *p;
62013+ unsigned int len;
62014+
62015+ if (copy_from_user(&p, argv + i, sizeof(p)))
62016+ goto log;
62017+ if (!p)
62018+ goto log;
62019+ len = strnlen_user(p, 128 - execlen);
62020+ if (len > 128 - execlen)
62021+ len = 128 - execlen;
62022+ else if (len > 0)
62023+ len--;
62024+ if (copy_from_user(grarg + execlen, p, len))
62025+ goto log;
62026+
62027+ /* rewrite unprintable characters */
62028+ for (x = 0; x < len; x++) {
62029+ c = *(grarg + execlen + x);
62030+ if (c < 32 || c > 126)
62031+ *(grarg + execlen + x) = ' ';
62032+ }
62033+
62034+ execlen += len;
62035+ *(grarg + execlen) = ' ';
62036+ *(grarg + execlen + 1) = '\0';
62037+ execlen++;
62038+ }
62039+
62040+ log:
62041+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62042+ bprm->file->f_path.mnt, grarg);
62043+ mutex_unlock(&gr_exec_arg_mutex);
62044+#endif
62045+ return;
62046+}
62047+
62048+#ifdef CONFIG_COMPAT
62049+void
62050+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62051+{
62052+#ifdef CONFIG_GRKERNSEC_EXECLOG
62053+ char *grarg = gr_exec_arg_buf;
62054+ unsigned int i, x, execlen = 0;
62055+ char c;
62056+
62057+ if (!((grsec_enable_execlog && grsec_enable_group &&
62058+ in_group_p(grsec_audit_gid))
62059+ || (grsec_enable_execlog && !grsec_enable_group)))
62060+ return;
62061+
62062+ mutex_lock(&gr_exec_arg_mutex);
62063+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62064+
62065+ if (unlikely(argv == NULL))
62066+ goto log;
62067+
62068+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62069+ compat_uptr_t p;
62070+ unsigned int len;
62071+
62072+ if (get_user(p, argv + i))
62073+ goto log;
62074+ len = strnlen_user(compat_ptr(p), 128 - execlen);
62075+ if (len > 128 - execlen)
62076+ len = 128 - execlen;
62077+ else if (len > 0)
62078+ len--;
62079+ else
62080+ goto log;
62081+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62082+ goto log;
62083+
62084+ /* rewrite unprintable characters */
62085+ for (x = 0; x < len; x++) {
62086+ c = *(grarg + execlen + x);
62087+ if (c < 32 || c > 126)
62088+ *(grarg + execlen + x) = ' ';
62089+ }
62090+
62091+ execlen += len;
62092+ *(grarg + execlen) = ' ';
62093+ *(grarg + execlen + 1) = '\0';
62094+ execlen++;
62095+ }
62096+
62097+ log:
62098+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62099+ bprm->file->f_path.mnt, grarg);
62100+ mutex_unlock(&gr_exec_arg_mutex);
62101+#endif
62102+ return;
62103+}
62104+#endif
62105+
62106+#ifdef CONFIG_GRKERNSEC
62107+extern int gr_acl_is_capable(const int cap);
62108+extern int gr_acl_is_capable_nolog(const int cap);
62109+extern int gr_chroot_is_capable(const int cap);
62110+extern int gr_chroot_is_capable_nolog(const int cap);
62111+#endif
62112+
62113+const char *captab_log[] = {
62114+ "CAP_CHOWN",
62115+ "CAP_DAC_OVERRIDE",
62116+ "CAP_DAC_READ_SEARCH",
62117+ "CAP_FOWNER",
62118+ "CAP_FSETID",
62119+ "CAP_KILL",
62120+ "CAP_SETGID",
62121+ "CAP_SETUID",
62122+ "CAP_SETPCAP",
62123+ "CAP_LINUX_IMMUTABLE",
62124+ "CAP_NET_BIND_SERVICE",
62125+ "CAP_NET_BROADCAST",
62126+ "CAP_NET_ADMIN",
62127+ "CAP_NET_RAW",
62128+ "CAP_IPC_LOCK",
62129+ "CAP_IPC_OWNER",
62130+ "CAP_SYS_MODULE",
62131+ "CAP_SYS_RAWIO",
62132+ "CAP_SYS_CHROOT",
62133+ "CAP_SYS_PTRACE",
62134+ "CAP_SYS_PACCT",
62135+ "CAP_SYS_ADMIN",
62136+ "CAP_SYS_BOOT",
62137+ "CAP_SYS_NICE",
62138+ "CAP_SYS_RESOURCE",
62139+ "CAP_SYS_TIME",
62140+ "CAP_SYS_TTY_CONFIG",
62141+ "CAP_MKNOD",
62142+ "CAP_LEASE",
62143+ "CAP_AUDIT_WRITE",
62144+ "CAP_AUDIT_CONTROL",
62145+ "CAP_SETFCAP",
62146+ "CAP_MAC_OVERRIDE",
62147+ "CAP_MAC_ADMIN"
62148+};
62149+
62150+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62151+
62152+int gr_is_capable(const int cap)
62153+{
62154+#ifdef CONFIG_GRKERNSEC
62155+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62156+ return 1;
62157+ return 0;
62158+#else
62159+ return 1;
62160+#endif
62161+}
62162+
62163+int gr_is_capable_nolog(const int cap)
62164+{
62165+#ifdef CONFIG_GRKERNSEC
62166+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62167+ return 1;
62168+ return 0;
62169+#else
62170+ return 1;
62171+#endif
62172+}
62173+
62174+EXPORT_SYMBOL(gr_is_capable);
62175+EXPORT_SYMBOL(gr_is_capable_nolog);
62176diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62177new file mode 100644
62178index 0000000..d3ee748
62179--- /dev/null
62180+++ b/grsecurity/grsec_fifo.c
62181@@ -0,0 +1,24 @@
62182+#include <linux/kernel.h>
62183+#include <linux/sched.h>
62184+#include <linux/fs.h>
62185+#include <linux/file.h>
62186+#include <linux/grinternal.h>
62187+
62188+int
62189+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62190+ const struct dentry *dir, const int flag, const int acc_mode)
62191+{
62192+#ifdef CONFIG_GRKERNSEC_FIFO
62193+ const struct cred *cred = current_cred();
62194+
62195+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62196+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62197+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62198+ (cred->fsuid != dentry->d_inode->i_uid)) {
62199+ if (!inode_permission(dentry->d_inode, acc_mode))
62200+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62201+ return -EACCES;
62202+ }
62203+#endif
62204+ return 0;
62205+}
62206diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62207new file mode 100644
62208index 0000000..8ca18bf
62209--- /dev/null
62210+++ b/grsecurity/grsec_fork.c
62211@@ -0,0 +1,23 @@
62212+#include <linux/kernel.h>
62213+#include <linux/sched.h>
62214+#include <linux/grsecurity.h>
62215+#include <linux/grinternal.h>
62216+#include <linux/errno.h>
62217+
62218+void
62219+gr_log_forkfail(const int retval)
62220+{
62221+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62222+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62223+ switch (retval) {
62224+ case -EAGAIN:
62225+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62226+ break;
62227+ case -ENOMEM:
62228+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62229+ break;
62230+ }
62231+ }
62232+#endif
62233+ return;
62234+}
62235diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62236new file mode 100644
62237index 0000000..f813c26
62238--- /dev/null
62239+++ b/grsecurity/grsec_init.c
62240@@ -0,0 +1,270 @@
62241+#include <linux/kernel.h>
62242+#include <linux/sched.h>
62243+#include <linux/mm.h>
62244+#include <linux/smp_lock.h>
62245+#include <linux/gracl.h>
62246+#include <linux/slab.h>
62247+#include <linux/vmalloc.h>
62248+#include <linux/percpu.h>
62249+#include <linux/module.h>
62250+
62251+int grsec_enable_brute;
62252+int grsec_enable_link;
62253+int grsec_enable_dmesg;
62254+int grsec_enable_harden_ptrace;
62255+int grsec_enable_fifo;
62256+int grsec_enable_execlog;
62257+int grsec_enable_signal;
62258+int grsec_enable_forkfail;
62259+int grsec_enable_audit_ptrace;
62260+int grsec_enable_time;
62261+int grsec_enable_audit_textrel;
62262+int grsec_enable_group;
62263+int grsec_audit_gid;
62264+int grsec_enable_chdir;
62265+int grsec_enable_mount;
62266+int grsec_enable_rofs;
62267+int grsec_enable_chroot_findtask;
62268+int grsec_enable_chroot_mount;
62269+int grsec_enable_chroot_shmat;
62270+int grsec_enable_chroot_fchdir;
62271+int grsec_enable_chroot_double;
62272+int grsec_enable_chroot_pivot;
62273+int grsec_enable_chroot_chdir;
62274+int grsec_enable_chroot_chmod;
62275+int grsec_enable_chroot_mknod;
62276+int grsec_enable_chroot_nice;
62277+int grsec_enable_chroot_execlog;
62278+int grsec_enable_chroot_caps;
62279+int grsec_enable_chroot_sysctl;
62280+int grsec_enable_chroot_unix;
62281+int grsec_enable_tpe;
62282+int grsec_tpe_gid;
62283+int grsec_enable_blackhole;
62284+#ifdef CONFIG_IPV6_MODULE
62285+EXPORT_SYMBOL(grsec_enable_blackhole);
62286+#endif
62287+int grsec_lastack_retries;
62288+int grsec_enable_tpe_all;
62289+int grsec_enable_tpe_invert;
62290+int grsec_enable_socket_all;
62291+int grsec_socket_all_gid;
62292+int grsec_enable_socket_client;
62293+int grsec_socket_client_gid;
62294+int grsec_enable_socket_server;
62295+int grsec_socket_server_gid;
62296+int grsec_resource_logging;
62297+int grsec_disable_privio;
62298+int grsec_enable_log_rwxmaps;
62299+int grsec_lock;
62300+
62301+DEFINE_SPINLOCK(grsec_alert_lock);
62302+unsigned long grsec_alert_wtime = 0;
62303+unsigned long grsec_alert_fyet = 0;
62304+
62305+DEFINE_SPINLOCK(grsec_audit_lock);
62306+
62307+DEFINE_RWLOCK(grsec_exec_file_lock);
62308+
62309+char *gr_shared_page[4];
62310+
62311+char *gr_alert_log_fmt;
62312+char *gr_audit_log_fmt;
62313+char *gr_alert_log_buf;
62314+char *gr_audit_log_buf;
62315+
62316+extern struct gr_arg *gr_usermode;
62317+extern unsigned char *gr_system_salt;
62318+extern unsigned char *gr_system_sum;
62319+
62320+void __init
62321+grsecurity_init(void)
62322+{
62323+ int j;
62324+ /* create the per-cpu shared pages */
62325+
62326+#ifdef CONFIG_X86
62327+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
62328+#endif
62329+
62330+ for (j = 0; j < 4; j++) {
62331+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
62332+ if (gr_shared_page[j] == NULL) {
62333+ panic("Unable to allocate grsecurity shared page");
62334+ return;
62335+ }
62336+ }
62337+
62338+ /* allocate log buffers */
62339+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
62340+ if (!gr_alert_log_fmt) {
62341+ panic("Unable to allocate grsecurity alert log format buffer");
62342+ return;
62343+ }
62344+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
62345+ if (!gr_audit_log_fmt) {
62346+ panic("Unable to allocate grsecurity audit log format buffer");
62347+ return;
62348+ }
62349+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62350+ if (!gr_alert_log_buf) {
62351+ panic("Unable to allocate grsecurity alert log buffer");
62352+ return;
62353+ }
62354+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62355+ if (!gr_audit_log_buf) {
62356+ panic("Unable to allocate grsecurity audit log buffer");
62357+ return;
62358+ }
62359+
62360+ /* allocate memory for authentication structure */
62361+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
62362+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
62363+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
62364+
62365+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
62366+ panic("Unable to allocate grsecurity authentication structure");
62367+ return;
62368+ }
62369+
62370+
62371+#ifdef CONFIG_GRKERNSEC_IO
62372+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
62373+ grsec_disable_privio = 1;
62374+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62375+ grsec_disable_privio = 1;
62376+#else
62377+ grsec_disable_privio = 0;
62378+#endif
62379+#endif
62380+
62381+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62382+ /* for backward compatibility, tpe_invert always defaults to on if
62383+ enabled in the kernel
62384+ */
62385+ grsec_enable_tpe_invert = 1;
62386+#endif
62387+
62388+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62389+#ifndef CONFIG_GRKERNSEC_SYSCTL
62390+ grsec_lock = 1;
62391+#endif
62392+
62393+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62394+ grsec_enable_audit_textrel = 1;
62395+#endif
62396+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62397+ grsec_enable_log_rwxmaps = 1;
62398+#endif
62399+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
62400+ grsec_enable_group = 1;
62401+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
62402+#endif
62403+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62404+ grsec_enable_chdir = 1;
62405+#endif
62406+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62407+ grsec_enable_harden_ptrace = 1;
62408+#endif
62409+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62410+ grsec_enable_mount = 1;
62411+#endif
62412+#ifdef CONFIG_GRKERNSEC_LINK
62413+ grsec_enable_link = 1;
62414+#endif
62415+#ifdef CONFIG_GRKERNSEC_BRUTE
62416+ grsec_enable_brute = 1;
62417+#endif
62418+#ifdef CONFIG_GRKERNSEC_DMESG
62419+ grsec_enable_dmesg = 1;
62420+#endif
62421+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
62422+ grsec_enable_blackhole = 1;
62423+ grsec_lastack_retries = 4;
62424+#endif
62425+#ifdef CONFIG_GRKERNSEC_FIFO
62426+ grsec_enable_fifo = 1;
62427+#endif
62428+#ifdef CONFIG_GRKERNSEC_EXECLOG
62429+ grsec_enable_execlog = 1;
62430+#endif
62431+#ifdef CONFIG_GRKERNSEC_SIGNAL
62432+ grsec_enable_signal = 1;
62433+#endif
62434+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62435+ grsec_enable_forkfail = 1;
62436+#endif
62437+#ifdef CONFIG_GRKERNSEC_TIME
62438+ grsec_enable_time = 1;
62439+#endif
62440+#ifdef CONFIG_GRKERNSEC_RESLOG
62441+ grsec_resource_logging = 1;
62442+#endif
62443+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62444+ grsec_enable_chroot_findtask = 1;
62445+#endif
62446+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62447+ grsec_enable_chroot_unix = 1;
62448+#endif
62449+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62450+ grsec_enable_chroot_mount = 1;
62451+#endif
62452+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62453+ grsec_enable_chroot_fchdir = 1;
62454+#endif
62455+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62456+ grsec_enable_chroot_shmat = 1;
62457+#endif
62458+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
62459+ grsec_enable_audit_ptrace = 1;
62460+#endif
62461+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62462+ grsec_enable_chroot_double = 1;
62463+#endif
62464+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62465+ grsec_enable_chroot_pivot = 1;
62466+#endif
62467+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62468+ grsec_enable_chroot_chdir = 1;
62469+#endif
62470+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62471+ grsec_enable_chroot_chmod = 1;
62472+#endif
62473+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62474+ grsec_enable_chroot_mknod = 1;
62475+#endif
62476+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62477+ grsec_enable_chroot_nice = 1;
62478+#endif
62479+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62480+ grsec_enable_chroot_execlog = 1;
62481+#endif
62482+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62483+ grsec_enable_chroot_caps = 1;
62484+#endif
62485+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62486+ grsec_enable_chroot_sysctl = 1;
62487+#endif
62488+#ifdef CONFIG_GRKERNSEC_TPE
62489+ grsec_enable_tpe = 1;
62490+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
62491+#ifdef CONFIG_GRKERNSEC_TPE_ALL
62492+ grsec_enable_tpe_all = 1;
62493+#endif
62494+#endif
62495+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
62496+ grsec_enable_socket_all = 1;
62497+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
62498+#endif
62499+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
62500+ grsec_enable_socket_client = 1;
62501+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
62502+#endif
62503+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62504+ grsec_enable_socket_server = 1;
62505+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
62506+#endif
62507+#endif
62508+
62509+ return;
62510+}
62511diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
62512new file mode 100644
62513index 0000000..3efe141
62514--- /dev/null
62515+++ b/grsecurity/grsec_link.c
62516@@ -0,0 +1,43 @@
62517+#include <linux/kernel.h>
62518+#include <linux/sched.h>
62519+#include <linux/fs.h>
62520+#include <linux/file.h>
62521+#include <linux/grinternal.h>
62522+
62523+int
62524+gr_handle_follow_link(const struct inode *parent,
62525+ const struct inode *inode,
62526+ const struct dentry *dentry, const struct vfsmount *mnt)
62527+{
62528+#ifdef CONFIG_GRKERNSEC_LINK
62529+ const struct cred *cred = current_cred();
62530+
62531+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
62532+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
62533+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
62534+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
62535+ return -EACCES;
62536+ }
62537+#endif
62538+ return 0;
62539+}
62540+
62541+int
62542+gr_handle_hardlink(const struct dentry *dentry,
62543+ const struct vfsmount *mnt,
62544+ struct inode *inode, const int mode, const char *to)
62545+{
62546+#ifdef CONFIG_GRKERNSEC_LINK
62547+ const struct cred *cred = current_cred();
62548+
62549+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
62550+ (!S_ISREG(mode) || (mode & S_ISUID) ||
62551+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
62552+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
62553+ !capable(CAP_FOWNER) && cred->uid) {
62554+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
62555+ return -EPERM;
62556+ }
62557+#endif
62558+ return 0;
62559+}
62560diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
62561new file mode 100644
62562index 0000000..a45d2e9
62563--- /dev/null
62564+++ b/grsecurity/grsec_log.c
62565@@ -0,0 +1,322 @@
62566+#include <linux/kernel.h>
62567+#include <linux/sched.h>
62568+#include <linux/file.h>
62569+#include <linux/tty.h>
62570+#include <linux/fs.h>
62571+#include <linux/grinternal.h>
62572+
62573+#ifdef CONFIG_TREE_PREEMPT_RCU
62574+#define DISABLE_PREEMPT() preempt_disable()
62575+#define ENABLE_PREEMPT() preempt_enable()
62576+#else
62577+#define DISABLE_PREEMPT()
62578+#define ENABLE_PREEMPT()
62579+#endif
62580+
62581+#define BEGIN_LOCKS(x) \
62582+ DISABLE_PREEMPT(); \
62583+ rcu_read_lock(); \
62584+ read_lock(&tasklist_lock); \
62585+ read_lock(&grsec_exec_file_lock); \
62586+ if (x != GR_DO_AUDIT) \
62587+ spin_lock(&grsec_alert_lock); \
62588+ else \
62589+ spin_lock(&grsec_audit_lock)
62590+
62591+#define END_LOCKS(x) \
62592+ if (x != GR_DO_AUDIT) \
62593+ spin_unlock(&grsec_alert_lock); \
62594+ else \
62595+ spin_unlock(&grsec_audit_lock); \
62596+ read_unlock(&grsec_exec_file_lock); \
62597+ read_unlock(&tasklist_lock); \
62598+ rcu_read_unlock(); \
62599+ ENABLE_PREEMPT(); \
62600+ if (x == GR_DONT_AUDIT) \
62601+ gr_handle_alertkill(current)
62602+
62603+enum {
62604+ FLOODING,
62605+ NO_FLOODING
62606+};
62607+
62608+extern char *gr_alert_log_fmt;
62609+extern char *gr_audit_log_fmt;
62610+extern char *gr_alert_log_buf;
62611+extern char *gr_audit_log_buf;
62612+
62613+static int gr_log_start(int audit)
62614+{
62615+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
62616+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
62617+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62618+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
62619+ unsigned long curr_secs = get_seconds();
62620+
62621+ if (audit == GR_DO_AUDIT)
62622+ goto set_fmt;
62623+
62624+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
62625+ grsec_alert_wtime = curr_secs;
62626+ grsec_alert_fyet = 0;
62627+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
62628+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
62629+ grsec_alert_fyet++;
62630+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
62631+ grsec_alert_wtime = curr_secs;
62632+ grsec_alert_fyet++;
62633+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
62634+ return FLOODING;
62635+ }
62636+ else return FLOODING;
62637+
62638+set_fmt:
62639+#endif
62640+ memset(buf, 0, PAGE_SIZE);
62641+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
62642+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
62643+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62644+ } else if (current->signal->curr_ip) {
62645+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
62646+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
62647+ } else if (gr_acl_is_enabled()) {
62648+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
62649+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62650+ } else {
62651+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
62652+ strcpy(buf, fmt);
62653+ }
62654+
62655+ return NO_FLOODING;
62656+}
62657+
62658+static void gr_log_middle(int audit, const char *msg, va_list ap)
62659+ __attribute__ ((format (printf, 2, 0)));
62660+
62661+static void gr_log_middle(int audit, const char *msg, va_list ap)
62662+{
62663+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62664+ unsigned int len = strlen(buf);
62665+
62666+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62667+
62668+ return;
62669+}
62670+
62671+static void gr_log_middle_varargs(int audit, const char *msg, ...)
62672+ __attribute__ ((format (printf, 2, 3)));
62673+
62674+static void gr_log_middle_varargs(int audit, const char *msg, ...)
62675+{
62676+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62677+ unsigned int len = strlen(buf);
62678+ va_list ap;
62679+
62680+ va_start(ap, msg);
62681+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62682+ va_end(ap);
62683+
62684+ return;
62685+}
62686+
62687+static void gr_log_end(int audit, int append_default)
62688+{
62689+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62690+
62691+ if (append_default) {
62692+ unsigned int len = strlen(buf);
62693+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
62694+ }
62695+
62696+ printk("%s\n", buf);
62697+
62698+ return;
62699+}
62700+
62701+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
62702+{
62703+ int logtype;
62704+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
62705+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
62706+ void *voidptr = NULL;
62707+ int num1 = 0, num2 = 0;
62708+ unsigned long ulong1 = 0, ulong2 = 0;
62709+ struct dentry *dentry = NULL;
62710+ struct vfsmount *mnt = NULL;
62711+ struct file *file = NULL;
62712+ struct task_struct *task = NULL;
62713+ const struct cred *cred, *pcred;
62714+ va_list ap;
62715+
62716+ BEGIN_LOCKS(audit);
62717+ logtype = gr_log_start(audit);
62718+ if (logtype == FLOODING) {
62719+ END_LOCKS(audit);
62720+ return;
62721+ }
62722+ va_start(ap, argtypes);
62723+ switch (argtypes) {
62724+ case GR_TTYSNIFF:
62725+ task = va_arg(ap, struct task_struct *);
62726+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
62727+ break;
62728+ case GR_SYSCTL_HIDDEN:
62729+ str1 = va_arg(ap, char *);
62730+ gr_log_middle_varargs(audit, msg, result, str1);
62731+ break;
62732+ case GR_RBAC:
62733+ dentry = va_arg(ap, struct dentry *);
62734+ mnt = va_arg(ap, struct vfsmount *);
62735+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
62736+ break;
62737+ case GR_RBAC_STR:
62738+ dentry = va_arg(ap, struct dentry *);
62739+ mnt = va_arg(ap, struct vfsmount *);
62740+ str1 = va_arg(ap, char *);
62741+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
62742+ break;
62743+ case GR_STR_RBAC:
62744+ str1 = va_arg(ap, char *);
62745+ dentry = va_arg(ap, struct dentry *);
62746+ mnt = va_arg(ap, struct vfsmount *);
62747+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
62748+ break;
62749+ case GR_RBAC_MODE2:
62750+ dentry = va_arg(ap, struct dentry *);
62751+ mnt = va_arg(ap, struct vfsmount *);
62752+ str1 = va_arg(ap, char *);
62753+ str2 = va_arg(ap, char *);
62754+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
62755+ break;
62756+ case GR_RBAC_MODE3:
62757+ dentry = va_arg(ap, struct dentry *);
62758+ mnt = va_arg(ap, struct vfsmount *);
62759+ str1 = va_arg(ap, char *);
62760+ str2 = va_arg(ap, char *);
62761+ str3 = va_arg(ap, char *);
62762+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
62763+ break;
62764+ case GR_FILENAME:
62765+ dentry = va_arg(ap, struct dentry *);
62766+ mnt = va_arg(ap, struct vfsmount *);
62767+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
62768+ break;
62769+ case GR_STR_FILENAME:
62770+ str1 = va_arg(ap, char *);
62771+ dentry = va_arg(ap, struct dentry *);
62772+ mnt = va_arg(ap, struct vfsmount *);
62773+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
62774+ break;
62775+ case GR_FILENAME_STR:
62776+ dentry = va_arg(ap, struct dentry *);
62777+ mnt = va_arg(ap, struct vfsmount *);
62778+ str1 = va_arg(ap, char *);
62779+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
62780+ break;
62781+ case GR_FILENAME_TWO_INT:
62782+ dentry = va_arg(ap, struct dentry *);
62783+ mnt = va_arg(ap, struct vfsmount *);
62784+ num1 = va_arg(ap, int);
62785+ num2 = va_arg(ap, int);
62786+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
62787+ break;
62788+ case GR_FILENAME_TWO_INT_STR:
62789+ dentry = va_arg(ap, struct dentry *);
62790+ mnt = va_arg(ap, struct vfsmount *);
62791+ num1 = va_arg(ap, int);
62792+ num2 = va_arg(ap, int);
62793+ str1 = va_arg(ap, char *);
62794+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
62795+ break;
62796+ case GR_TEXTREL:
62797+ file = va_arg(ap, struct file *);
62798+ ulong1 = va_arg(ap, unsigned long);
62799+ ulong2 = va_arg(ap, unsigned long);
62800+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
62801+ break;
62802+ case GR_PTRACE:
62803+ task = va_arg(ap, struct task_struct *);
62804+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
62805+ break;
62806+ case GR_RESOURCE:
62807+ task = va_arg(ap, struct task_struct *);
62808+ cred = __task_cred(task);
62809+ pcred = __task_cred(task->real_parent);
62810+ ulong1 = va_arg(ap, unsigned long);
62811+ str1 = va_arg(ap, char *);
62812+ ulong2 = va_arg(ap, unsigned long);
62813+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62814+ break;
62815+ case GR_CAP:
62816+ task = va_arg(ap, struct task_struct *);
62817+ cred = __task_cred(task);
62818+ pcred = __task_cred(task->real_parent);
62819+ str1 = va_arg(ap, char *);
62820+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62821+ break;
62822+ case GR_SIG:
62823+ str1 = va_arg(ap, char *);
62824+ voidptr = va_arg(ap, void *);
62825+ gr_log_middle_varargs(audit, msg, str1, voidptr);
62826+ break;
62827+ case GR_SIG2:
62828+ task = va_arg(ap, struct task_struct *);
62829+ cred = __task_cred(task);
62830+ pcred = __task_cred(task->real_parent);
62831+ num1 = va_arg(ap, int);
62832+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62833+ break;
62834+ case GR_CRASH1:
62835+ task = va_arg(ap, struct task_struct *);
62836+ cred = __task_cred(task);
62837+ pcred = __task_cred(task->real_parent);
62838+ ulong1 = va_arg(ap, unsigned long);
62839+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
62840+ break;
62841+ case GR_CRASH2:
62842+ task = va_arg(ap, struct task_struct *);
62843+ cred = __task_cred(task);
62844+ pcred = __task_cred(task->real_parent);
62845+ ulong1 = va_arg(ap, unsigned long);
62846+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
62847+ break;
62848+ case GR_RWXMAP:
62849+ file = va_arg(ap, struct file *);
62850+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
62851+ break;
62852+ case GR_PSACCT:
62853+ {
62854+ unsigned int wday, cday;
62855+ __u8 whr, chr;
62856+ __u8 wmin, cmin;
62857+ __u8 wsec, csec;
62858+ char cur_tty[64] = { 0 };
62859+ char parent_tty[64] = { 0 };
62860+
62861+ task = va_arg(ap, struct task_struct *);
62862+ wday = va_arg(ap, unsigned int);
62863+ cday = va_arg(ap, unsigned int);
62864+ whr = va_arg(ap, int);
62865+ chr = va_arg(ap, int);
62866+ wmin = va_arg(ap, int);
62867+ cmin = va_arg(ap, int);
62868+ wsec = va_arg(ap, int);
62869+ csec = va_arg(ap, int);
62870+ ulong1 = va_arg(ap, unsigned long);
62871+ cred = __task_cred(task);
62872+ pcred = __task_cred(task->real_parent);
62873+
62874+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
62875+ }
62876+ break;
62877+ default:
62878+ gr_log_middle(audit, msg, ap);
62879+ }
62880+ va_end(ap);
62881+ // these don't need DEFAULTSECARGS printed on the end
62882+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
62883+ gr_log_end(audit, 0);
62884+ else
62885+ gr_log_end(audit, 1);
62886+ END_LOCKS(audit);
62887+}
62888diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
62889new file mode 100644
62890index 0000000..6c0416b
62891--- /dev/null
62892+++ b/grsecurity/grsec_mem.c
62893@@ -0,0 +1,33 @@
62894+#include <linux/kernel.h>
62895+#include <linux/sched.h>
62896+#include <linux/mm.h>
62897+#include <linux/mman.h>
62898+#include <linux/grinternal.h>
62899+
62900+void
62901+gr_handle_ioperm(void)
62902+{
62903+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
62904+ return;
62905+}
62906+
62907+void
62908+gr_handle_iopl(void)
62909+{
62910+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
62911+ return;
62912+}
62913+
62914+void
62915+gr_handle_mem_readwrite(u64 from, u64 to)
62916+{
62917+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
62918+ return;
62919+}
62920+
62921+void
62922+gr_handle_vm86(void)
62923+{
62924+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
62925+ return;
62926+}
62927diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
62928new file mode 100644
62929index 0000000..2131422
62930--- /dev/null
62931+++ b/grsecurity/grsec_mount.c
62932@@ -0,0 +1,62 @@
62933+#include <linux/kernel.h>
62934+#include <linux/sched.h>
62935+#include <linux/mount.h>
62936+#include <linux/grsecurity.h>
62937+#include <linux/grinternal.h>
62938+
62939+void
62940+gr_log_remount(const char *devname, const int retval)
62941+{
62942+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62943+ if (grsec_enable_mount && (retval >= 0))
62944+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
62945+#endif
62946+ return;
62947+}
62948+
62949+void
62950+gr_log_unmount(const char *devname, const int retval)
62951+{
62952+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62953+ if (grsec_enable_mount && (retval >= 0))
62954+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
62955+#endif
62956+ return;
62957+}
62958+
62959+void
62960+gr_log_mount(const char *from, const char *to, const int retval)
62961+{
62962+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62963+ if (grsec_enable_mount && (retval >= 0))
62964+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
62965+#endif
62966+ return;
62967+}
62968+
62969+int
62970+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
62971+{
62972+#ifdef CONFIG_GRKERNSEC_ROFS
62973+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
62974+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
62975+ return -EPERM;
62976+ } else
62977+ return 0;
62978+#endif
62979+ return 0;
62980+}
62981+
62982+int
62983+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
62984+{
62985+#ifdef CONFIG_GRKERNSEC_ROFS
62986+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
62987+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
62988+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
62989+ return -EPERM;
62990+ } else
62991+ return 0;
62992+#endif
62993+ return 0;
62994+}
62995diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
62996new file mode 100644
62997index 0000000..a3b12a0
62998--- /dev/null
62999+++ b/grsecurity/grsec_pax.c
63000@@ -0,0 +1,36 @@
63001+#include <linux/kernel.h>
63002+#include <linux/sched.h>
63003+#include <linux/mm.h>
63004+#include <linux/file.h>
63005+#include <linux/grinternal.h>
63006+#include <linux/grsecurity.h>
63007+
63008+void
63009+gr_log_textrel(struct vm_area_struct * vma)
63010+{
63011+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63012+ if (grsec_enable_audit_textrel)
63013+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63014+#endif
63015+ return;
63016+}
63017+
63018+void
63019+gr_log_rwxmmap(struct file *file)
63020+{
63021+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63022+ if (grsec_enable_log_rwxmaps)
63023+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63024+#endif
63025+ return;
63026+}
63027+
63028+void
63029+gr_log_rwxmprotect(struct file *file)
63030+{
63031+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63032+ if (grsec_enable_log_rwxmaps)
63033+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63034+#endif
63035+ return;
63036+}
63037diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63038new file mode 100644
63039index 0000000..472c1d6
63040--- /dev/null
63041+++ b/grsecurity/grsec_ptrace.c
63042@@ -0,0 +1,14 @@
63043+#include <linux/kernel.h>
63044+#include <linux/sched.h>
63045+#include <linux/grinternal.h>
63046+#include <linux/grsecurity.h>
63047+
63048+void
63049+gr_audit_ptrace(struct task_struct *task)
63050+{
63051+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63052+ if (grsec_enable_audit_ptrace)
63053+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63054+#endif
63055+ return;
63056+}
63057diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63058new file mode 100644
63059index 0000000..dc73fe9
63060--- /dev/null
63061+++ b/grsecurity/grsec_sig.c
63062@@ -0,0 +1,205 @@
63063+#include <linux/kernel.h>
63064+#include <linux/sched.h>
63065+#include <linux/delay.h>
63066+#include <linux/grsecurity.h>
63067+#include <linux/grinternal.h>
63068+#include <linux/hardirq.h>
63069+
63070+char *signames[] = {
63071+ [SIGSEGV] = "Segmentation fault",
63072+ [SIGILL] = "Illegal instruction",
63073+ [SIGABRT] = "Abort",
63074+ [SIGBUS] = "Invalid alignment/Bus error"
63075+};
63076+
63077+void
63078+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63079+{
63080+#ifdef CONFIG_GRKERNSEC_SIGNAL
63081+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63082+ (sig == SIGABRT) || (sig == SIGBUS))) {
63083+ if (t->pid == current->pid) {
63084+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63085+ } else {
63086+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63087+ }
63088+ }
63089+#endif
63090+ return;
63091+}
63092+
63093+int
63094+gr_handle_signal(const struct task_struct *p, const int sig)
63095+{
63096+#ifdef CONFIG_GRKERNSEC
63097+ if (current->pid > 1 && gr_check_protected_task(p)) {
63098+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63099+ return -EPERM;
63100+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63101+ return -EPERM;
63102+ }
63103+#endif
63104+ return 0;
63105+}
63106+
63107+#ifdef CONFIG_GRKERNSEC
63108+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63109+
63110+int gr_fake_force_sig(int sig, struct task_struct *t)
63111+{
63112+ unsigned long int flags;
63113+ int ret, blocked, ignored;
63114+ struct k_sigaction *action;
63115+
63116+ spin_lock_irqsave(&t->sighand->siglock, flags);
63117+ action = &t->sighand->action[sig-1];
63118+ ignored = action->sa.sa_handler == SIG_IGN;
63119+ blocked = sigismember(&t->blocked, sig);
63120+ if (blocked || ignored) {
63121+ action->sa.sa_handler = SIG_DFL;
63122+ if (blocked) {
63123+ sigdelset(&t->blocked, sig);
63124+ recalc_sigpending_and_wake(t);
63125+ }
63126+ }
63127+ if (action->sa.sa_handler == SIG_DFL)
63128+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
63129+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63130+
63131+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
63132+
63133+ return ret;
63134+}
63135+#endif
63136+
63137+#ifdef CONFIG_GRKERNSEC_BRUTE
63138+#define GR_USER_BAN_TIME (15 * 60)
63139+
63140+static int __get_dumpable(unsigned long mm_flags)
63141+{
63142+ int ret;
63143+
63144+ ret = mm_flags & MMF_DUMPABLE_MASK;
63145+ return (ret >= 2) ? 2 : ret;
63146+}
63147+#endif
63148+
63149+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63150+{
63151+#ifdef CONFIG_GRKERNSEC_BRUTE
63152+ uid_t uid = 0;
63153+
63154+ if (!grsec_enable_brute)
63155+ return;
63156+
63157+ rcu_read_lock();
63158+ read_lock(&tasklist_lock);
63159+ read_lock(&grsec_exec_file_lock);
63160+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63161+ p->real_parent->brute = 1;
63162+ else {
63163+ const struct cred *cred = __task_cred(p), *cred2;
63164+ struct task_struct *tsk, *tsk2;
63165+
63166+ if (!__get_dumpable(mm_flags) && cred->uid) {
63167+ struct user_struct *user;
63168+
63169+ uid = cred->uid;
63170+
63171+ /* this is put upon execution past expiration */
63172+ user = find_user(uid);
63173+ if (user == NULL)
63174+ goto unlock;
63175+ user->banned = 1;
63176+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63177+ if (user->ban_expires == ~0UL)
63178+ user->ban_expires--;
63179+
63180+ do_each_thread(tsk2, tsk) {
63181+ cred2 = __task_cred(tsk);
63182+ if (tsk != p && cred2->uid == uid)
63183+ gr_fake_force_sig(SIGKILL, tsk);
63184+ } while_each_thread(tsk2, tsk);
63185+ }
63186+ }
63187+unlock:
63188+ read_unlock(&grsec_exec_file_lock);
63189+ read_unlock(&tasklist_lock);
63190+ rcu_read_unlock();
63191+
63192+ if (uid)
63193+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63194+#endif
63195+ return;
63196+}
63197+
63198+void gr_handle_brute_check(void)
63199+{
63200+#ifdef CONFIG_GRKERNSEC_BRUTE
63201+ if (current->brute)
63202+ msleep(30 * 1000);
63203+#endif
63204+ return;
63205+}
63206+
63207+void gr_handle_kernel_exploit(void)
63208+{
63209+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63210+ const struct cred *cred;
63211+ struct task_struct *tsk, *tsk2;
63212+ struct user_struct *user;
63213+ uid_t uid;
63214+
63215+ if (in_irq() || in_serving_softirq() || in_nmi())
63216+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63217+
63218+ uid = current_uid();
63219+
63220+ if (uid == 0)
63221+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
63222+ else {
63223+ /* kill all the processes of this user, hold a reference
63224+ to their creds struct, and prevent them from creating
63225+ another process until system reset
63226+ */
63227+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63228+ /* we intentionally leak this ref */
63229+ user = get_uid(current->cred->user);
63230+ if (user) {
63231+ user->banned = 1;
63232+ user->ban_expires = ~0UL;
63233+ }
63234+
63235+ read_lock(&tasklist_lock);
63236+ do_each_thread(tsk2, tsk) {
63237+ cred = __task_cred(tsk);
63238+ if (cred->uid == uid)
63239+ gr_fake_force_sig(SIGKILL, tsk);
63240+ } while_each_thread(tsk2, tsk);
63241+ read_unlock(&tasklist_lock);
63242+ }
63243+#endif
63244+}
63245+
63246+int __gr_process_user_ban(struct user_struct *user)
63247+{
63248+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63249+ if (unlikely(user->banned)) {
63250+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63251+ user->banned = 0;
63252+ user->ban_expires = 0;
63253+ free_uid(user);
63254+ } else
63255+ return -EPERM;
63256+ }
63257+#endif
63258+ return 0;
63259+}
63260+
63261+int gr_process_user_ban(void)
63262+{
63263+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63264+ return __gr_process_user_ban(current->cred->user);
63265+#endif
63266+ return 0;
63267+}
63268diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63269new file mode 100644
63270index 0000000..7512ea9
63271--- /dev/null
63272+++ b/grsecurity/grsec_sock.c
63273@@ -0,0 +1,275 @@
63274+#include <linux/kernel.h>
63275+#include <linux/module.h>
63276+#include <linux/sched.h>
63277+#include <linux/file.h>
63278+#include <linux/net.h>
63279+#include <linux/in.h>
63280+#include <linux/ip.h>
63281+#include <net/sock.h>
63282+#include <net/inet_sock.h>
63283+#include <linux/grsecurity.h>
63284+#include <linux/grinternal.h>
63285+#include <linux/gracl.h>
63286+
63287+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
63288+EXPORT_SYMBOL(gr_cap_rtnetlink);
63289+
63290+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
63291+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
63292+
63293+EXPORT_SYMBOL(gr_search_udp_recvmsg);
63294+EXPORT_SYMBOL(gr_search_udp_sendmsg);
63295+
63296+#ifdef CONFIG_UNIX_MODULE
63297+EXPORT_SYMBOL(gr_acl_handle_unix);
63298+EXPORT_SYMBOL(gr_acl_handle_mknod);
63299+EXPORT_SYMBOL(gr_handle_chroot_unix);
63300+EXPORT_SYMBOL(gr_handle_create);
63301+#endif
63302+
63303+#ifdef CONFIG_GRKERNSEC
63304+#define gr_conn_table_size 32749
63305+struct conn_table_entry {
63306+ struct conn_table_entry *next;
63307+ struct signal_struct *sig;
63308+};
63309+
63310+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
63311+DEFINE_SPINLOCK(gr_conn_table_lock);
63312+
63313+extern const char * gr_socktype_to_name(unsigned char type);
63314+extern const char * gr_proto_to_name(unsigned char proto);
63315+extern const char * gr_sockfamily_to_name(unsigned char family);
63316+
63317+static __inline__ int
63318+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
63319+{
63320+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
63321+}
63322+
63323+static __inline__ int
63324+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
63325+ __u16 sport, __u16 dport)
63326+{
63327+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
63328+ sig->gr_sport == sport && sig->gr_dport == dport))
63329+ return 1;
63330+ else
63331+ return 0;
63332+}
63333+
63334+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
63335+{
63336+ struct conn_table_entry **match;
63337+ unsigned int index;
63338+
63339+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63340+ sig->gr_sport, sig->gr_dport,
63341+ gr_conn_table_size);
63342+
63343+ newent->sig = sig;
63344+
63345+ match = &gr_conn_table[index];
63346+ newent->next = *match;
63347+ *match = newent;
63348+
63349+ return;
63350+}
63351+
63352+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
63353+{
63354+ struct conn_table_entry *match, *last = NULL;
63355+ unsigned int index;
63356+
63357+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63358+ sig->gr_sport, sig->gr_dport,
63359+ gr_conn_table_size);
63360+
63361+ match = gr_conn_table[index];
63362+ while (match && !conn_match(match->sig,
63363+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
63364+ sig->gr_dport)) {
63365+ last = match;
63366+ match = match->next;
63367+ }
63368+
63369+ if (match) {
63370+ if (last)
63371+ last->next = match->next;
63372+ else
63373+ gr_conn_table[index] = NULL;
63374+ kfree(match);
63375+ }
63376+
63377+ return;
63378+}
63379+
63380+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
63381+ __u16 sport, __u16 dport)
63382+{
63383+ struct conn_table_entry *match;
63384+ unsigned int index;
63385+
63386+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
63387+
63388+ match = gr_conn_table[index];
63389+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
63390+ match = match->next;
63391+
63392+ if (match)
63393+ return match->sig;
63394+ else
63395+ return NULL;
63396+}
63397+
63398+#endif
63399+
63400+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
63401+{
63402+#ifdef CONFIG_GRKERNSEC
63403+ struct signal_struct *sig = task->signal;
63404+ struct conn_table_entry *newent;
63405+
63406+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
63407+ if (newent == NULL)
63408+ return;
63409+ /* no bh lock needed since we are called with bh disabled */
63410+ spin_lock(&gr_conn_table_lock);
63411+ gr_del_task_from_ip_table_nolock(sig);
63412+ sig->gr_saddr = inet->rcv_saddr;
63413+ sig->gr_daddr = inet->daddr;
63414+ sig->gr_sport = inet->sport;
63415+ sig->gr_dport = inet->dport;
63416+ gr_add_to_task_ip_table_nolock(sig, newent);
63417+ spin_unlock(&gr_conn_table_lock);
63418+#endif
63419+ return;
63420+}
63421+
63422+void gr_del_task_from_ip_table(struct task_struct *task)
63423+{
63424+#ifdef CONFIG_GRKERNSEC
63425+ spin_lock_bh(&gr_conn_table_lock);
63426+ gr_del_task_from_ip_table_nolock(task->signal);
63427+ spin_unlock_bh(&gr_conn_table_lock);
63428+#endif
63429+ return;
63430+}
63431+
63432+void
63433+gr_attach_curr_ip(const struct sock *sk)
63434+{
63435+#ifdef CONFIG_GRKERNSEC
63436+ struct signal_struct *p, *set;
63437+ const struct inet_sock *inet = inet_sk(sk);
63438+
63439+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
63440+ return;
63441+
63442+ set = current->signal;
63443+
63444+ spin_lock_bh(&gr_conn_table_lock);
63445+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
63446+ inet->dport, inet->sport);
63447+ if (unlikely(p != NULL)) {
63448+ set->curr_ip = p->curr_ip;
63449+ set->used_accept = 1;
63450+ gr_del_task_from_ip_table_nolock(p);
63451+ spin_unlock_bh(&gr_conn_table_lock);
63452+ return;
63453+ }
63454+ spin_unlock_bh(&gr_conn_table_lock);
63455+
63456+ set->curr_ip = inet->daddr;
63457+ set->used_accept = 1;
63458+#endif
63459+ return;
63460+}
63461+
63462+int
63463+gr_handle_sock_all(const int family, const int type, const int protocol)
63464+{
63465+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63466+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
63467+ (family != AF_UNIX)) {
63468+ if (family == AF_INET)
63469+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
63470+ else
63471+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
63472+ return -EACCES;
63473+ }
63474+#endif
63475+ return 0;
63476+}
63477+
63478+int
63479+gr_handle_sock_server(const struct sockaddr *sck)
63480+{
63481+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63482+ if (grsec_enable_socket_server &&
63483+ in_group_p(grsec_socket_server_gid) &&
63484+ sck && (sck->sa_family != AF_UNIX) &&
63485+ (sck->sa_family != AF_LOCAL)) {
63486+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63487+ return -EACCES;
63488+ }
63489+#endif
63490+ return 0;
63491+}
63492+
63493+int
63494+gr_handle_sock_server_other(const struct sock *sck)
63495+{
63496+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63497+ if (grsec_enable_socket_server &&
63498+ in_group_p(grsec_socket_server_gid) &&
63499+ sck && (sck->sk_family != AF_UNIX) &&
63500+ (sck->sk_family != AF_LOCAL)) {
63501+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63502+ return -EACCES;
63503+ }
63504+#endif
63505+ return 0;
63506+}
63507+
63508+int
63509+gr_handle_sock_client(const struct sockaddr *sck)
63510+{
63511+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63512+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
63513+ sck && (sck->sa_family != AF_UNIX) &&
63514+ (sck->sa_family != AF_LOCAL)) {
63515+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
63516+ return -EACCES;
63517+ }
63518+#endif
63519+ return 0;
63520+}
63521+
63522+kernel_cap_t
63523+gr_cap_rtnetlink(struct sock *sock)
63524+{
63525+#ifdef CONFIG_GRKERNSEC
63526+ if (!gr_acl_is_enabled())
63527+ return current_cap();
63528+ else if (sock->sk_protocol == NETLINK_ISCSI &&
63529+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
63530+ gr_is_capable(CAP_SYS_ADMIN))
63531+ return current_cap();
63532+ else if (sock->sk_protocol == NETLINK_AUDIT &&
63533+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
63534+ gr_is_capable(CAP_AUDIT_WRITE) &&
63535+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
63536+ gr_is_capable(CAP_AUDIT_CONTROL))
63537+ return current_cap();
63538+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
63539+ ((sock->sk_protocol == NETLINK_ROUTE) ?
63540+ gr_is_capable_nolog(CAP_NET_ADMIN) :
63541+ gr_is_capable(CAP_NET_ADMIN)))
63542+ return current_cap();
63543+ else
63544+ return __cap_empty_set;
63545+#else
63546+ return current_cap();
63547+#endif
63548+}
63549diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
63550new file mode 100644
63551index 0000000..2753505
63552--- /dev/null
63553+++ b/grsecurity/grsec_sysctl.c
63554@@ -0,0 +1,479 @@
63555+#include <linux/kernel.h>
63556+#include <linux/sched.h>
63557+#include <linux/sysctl.h>
63558+#include <linux/grsecurity.h>
63559+#include <linux/grinternal.h>
63560+
63561+int
63562+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
63563+{
63564+#ifdef CONFIG_GRKERNSEC_SYSCTL
63565+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
63566+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
63567+ return -EACCES;
63568+ }
63569+#endif
63570+ return 0;
63571+}
63572+
63573+#ifdef CONFIG_GRKERNSEC_ROFS
63574+static int __maybe_unused one = 1;
63575+#endif
63576+
63577+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
63578+ctl_table grsecurity_table[] = {
63579+#ifdef CONFIG_GRKERNSEC_SYSCTL
63580+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
63581+#ifdef CONFIG_GRKERNSEC_IO
63582+ {
63583+ .ctl_name = CTL_UNNUMBERED,
63584+ .procname = "disable_priv_io",
63585+ .data = &grsec_disable_privio,
63586+ .maxlen = sizeof(int),
63587+ .mode = 0600,
63588+ .proc_handler = &proc_dointvec,
63589+ },
63590+#endif
63591+#endif
63592+#ifdef CONFIG_GRKERNSEC_LINK
63593+ {
63594+ .ctl_name = CTL_UNNUMBERED,
63595+ .procname = "linking_restrictions",
63596+ .data = &grsec_enable_link,
63597+ .maxlen = sizeof(int),
63598+ .mode = 0600,
63599+ .proc_handler = &proc_dointvec,
63600+ },
63601+#endif
63602+#ifdef CONFIG_GRKERNSEC_BRUTE
63603+ {
63604+ .ctl_name = CTL_UNNUMBERED,
63605+ .procname = "deter_bruteforce",
63606+ .data = &grsec_enable_brute,
63607+ .maxlen = sizeof(int),
63608+ .mode = 0600,
63609+ .proc_handler = &proc_dointvec,
63610+ },
63611+#endif
63612+#ifdef CONFIG_GRKERNSEC_FIFO
63613+ {
63614+ .ctl_name = CTL_UNNUMBERED,
63615+ .procname = "fifo_restrictions",
63616+ .data = &grsec_enable_fifo,
63617+ .maxlen = sizeof(int),
63618+ .mode = 0600,
63619+ .proc_handler = &proc_dointvec,
63620+ },
63621+#endif
63622+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63623+ {
63624+ .ctl_name = CTL_UNNUMBERED,
63625+ .procname = "ip_blackhole",
63626+ .data = &grsec_enable_blackhole,
63627+ .maxlen = sizeof(int),
63628+ .mode = 0600,
63629+ .proc_handler = &proc_dointvec,
63630+ },
63631+ {
63632+ .ctl_name = CTL_UNNUMBERED,
63633+ .procname = "lastack_retries",
63634+ .data = &grsec_lastack_retries,
63635+ .maxlen = sizeof(int),
63636+ .mode = 0600,
63637+ .proc_handler = &proc_dointvec,
63638+ },
63639+#endif
63640+#ifdef CONFIG_GRKERNSEC_EXECLOG
63641+ {
63642+ .ctl_name = CTL_UNNUMBERED,
63643+ .procname = "exec_logging",
63644+ .data = &grsec_enable_execlog,
63645+ .maxlen = sizeof(int),
63646+ .mode = 0600,
63647+ .proc_handler = &proc_dointvec,
63648+ },
63649+#endif
63650+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63651+ {
63652+ .ctl_name = CTL_UNNUMBERED,
63653+ .procname = "rwxmap_logging",
63654+ .data = &grsec_enable_log_rwxmaps,
63655+ .maxlen = sizeof(int),
63656+ .mode = 0600,
63657+ .proc_handler = &proc_dointvec,
63658+ },
63659+#endif
63660+#ifdef CONFIG_GRKERNSEC_SIGNAL
63661+ {
63662+ .ctl_name = CTL_UNNUMBERED,
63663+ .procname = "signal_logging",
63664+ .data = &grsec_enable_signal,
63665+ .maxlen = sizeof(int),
63666+ .mode = 0600,
63667+ .proc_handler = &proc_dointvec,
63668+ },
63669+#endif
63670+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63671+ {
63672+ .ctl_name = CTL_UNNUMBERED,
63673+ .procname = "forkfail_logging",
63674+ .data = &grsec_enable_forkfail,
63675+ .maxlen = sizeof(int),
63676+ .mode = 0600,
63677+ .proc_handler = &proc_dointvec,
63678+ },
63679+#endif
63680+#ifdef CONFIG_GRKERNSEC_TIME
63681+ {
63682+ .ctl_name = CTL_UNNUMBERED,
63683+ .procname = "timechange_logging",
63684+ .data = &grsec_enable_time,
63685+ .maxlen = sizeof(int),
63686+ .mode = 0600,
63687+ .proc_handler = &proc_dointvec,
63688+ },
63689+#endif
63690+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63691+ {
63692+ .ctl_name = CTL_UNNUMBERED,
63693+ .procname = "chroot_deny_shmat",
63694+ .data = &grsec_enable_chroot_shmat,
63695+ .maxlen = sizeof(int),
63696+ .mode = 0600,
63697+ .proc_handler = &proc_dointvec,
63698+ },
63699+#endif
63700+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63701+ {
63702+ .ctl_name = CTL_UNNUMBERED,
63703+ .procname = "chroot_deny_unix",
63704+ .data = &grsec_enable_chroot_unix,
63705+ .maxlen = sizeof(int),
63706+ .mode = 0600,
63707+ .proc_handler = &proc_dointvec,
63708+ },
63709+#endif
63710+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63711+ {
63712+ .ctl_name = CTL_UNNUMBERED,
63713+ .procname = "chroot_deny_mount",
63714+ .data = &grsec_enable_chroot_mount,
63715+ .maxlen = sizeof(int),
63716+ .mode = 0600,
63717+ .proc_handler = &proc_dointvec,
63718+ },
63719+#endif
63720+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63721+ {
63722+ .ctl_name = CTL_UNNUMBERED,
63723+ .procname = "chroot_deny_fchdir",
63724+ .data = &grsec_enable_chroot_fchdir,
63725+ .maxlen = sizeof(int),
63726+ .mode = 0600,
63727+ .proc_handler = &proc_dointvec,
63728+ },
63729+#endif
63730+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63731+ {
63732+ .ctl_name = CTL_UNNUMBERED,
63733+ .procname = "chroot_deny_chroot",
63734+ .data = &grsec_enable_chroot_double,
63735+ .maxlen = sizeof(int),
63736+ .mode = 0600,
63737+ .proc_handler = &proc_dointvec,
63738+ },
63739+#endif
63740+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63741+ {
63742+ .ctl_name = CTL_UNNUMBERED,
63743+ .procname = "chroot_deny_pivot",
63744+ .data = &grsec_enable_chroot_pivot,
63745+ .maxlen = sizeof(int),
63746+ .mode = 0600,
63747+ .proc_handler = &proc_dointvec,
63748+ },
63749+#endif
63750+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63751+ {
63752+ .ctl_name = CTL_UNNUMBERED,
63753+ .procname = "chroot_enforce_chdir",
63754+ .data = &grsec_enable_chroot_chdir,
63755+ .maxlen = sizeof(int),
63756+ .mode = 0600,
63757+ .proc_handler = &proc_dointvec,
63758+ },
63759+#endif
63760+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63761+ {
63762+ .ctl_name = CTL_UNNUMBERED,
63763+ .procname = "chroot_deny_chmod",
63764+ .data = &grsec_enable_chroot_chmod,
63765+ .maxlen = sizeof(int),
63766+ .mode = 0600,
63767+ .proc_handler = &proc_dointvec,
63768+ },
63769+#endif
63770+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63771+ {
63772+ .ctl_name = CTL_UNNUMBERED,
63773+ .procname = "chroot_deny_mknod",
63774+ .data = &grsec_enable_chroot_mknod,
63775+ .maxlen = sizeof(int),
63776+ .mode = 0600,
63777+ .proc_handler = &proc_dointvec,
63778+ },
63779+#endif
63780+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63781+ {
63782+ .ctl_name = CTL_UNNUMBERED,
63783+ .procname = "chroot_restrict_nice",
63784+ .data = &grsec_enable_chroot_nice,
63785+ .maxlen = sizeof(int),
63786+ .mode = 0600,
63787+ .proc_handler = &proc_dointvec,
63788+ },
63789+#endif
63790+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63791+ {
63792+ .ctl_name = CTL_UNNUMBERED,
63793+ .procname = "chroot_execlog",
63794+ .data = &grsec_enable_chroot_execlog,
63795+ .maxlen = sizeof(int),
63796+ .mode = 0600,
63797+ .proc_handler = &proc_dointvec,
63798+ },
63799+#endif
63800+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63801+ {
63802+ .ctl_name = CTL_UNNUMBERED,
63803+ .procname = "chroot_caps",
63804+ .data = &grsec_enable_chroot_caps,
63805+ .maxlen = sizeof(int),
63806+ .mode = 0600,
63807+ .proc_handler = &proc_dointvec,
63808+ },
63809+#endif
63810+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63811+ {
63812+ .ctl_name = CTL_UNNUMBERED,
63813+ .procname = "chroot_deny_sysctl",
63814+ .data = &grsec_enable_chroot_sysctl,
63815+ .maxlen = sizeof(int),
63816+ .mode = 0600,
63817+ .proc_handler = &proc_dointvec,
63818+ },
63819+#endif
63820+#ifdef CONFIG_GRKERNSEC_TPE
63821+ {
63822+ .ctl_name = CTL_UNNUMBERED,
63823+ .procname = "tpe",
63824+ .data = &grsec_enable_tpe,
63825+ .maxlen = sizeof(int),
63826+ .mode = 0600,
63827+ .proc_handler = &proc_dointvec,
63828+ },
63829+ {
63830+ .ctl_name = CTL_UNNUMBERED,
63831+ .procname = "tpe_gid",
63832+ .data = &grsec_tpe_gid,
63833+ .maxlen = sizeof(int),
63834+ .mode = 0600,
63835+ .proc_handler = &proc_dointvec,
63836+ },
63837+#endif
63838+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63839+ {
63840+ .ctl_name = CTL_UNNUMBERED,
63841+ .procname = "tpe_invert",
63842+ .data = &grsec_enable_tpe_invert,
63843+ .maxlen = sizeof(int),
63844+ .mode = 0600,
63845+ .proc_handler = &proc_dointvec,
63846+ },
63847+#endif
63848+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63849+ {
63850+ .ctl_name = CTL_UNNUMBERED,
63851+ .procname = "tpe_restrict_all",
63852+ .data = &grsec_enable_tpe_all,
63853+ .maxlen = sizeof(int),
63854+ .mode = 0600,
63855+ .proc_handler = &proc_dointvec,
63856+ },
63857+#endif
63858+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63859+ {
63860+ .ctl_name = CTL_UNNUMBERED,
63861+ .procname = "socket_all",
63862+ .data = &grsec_enable_socket_all,
63863+ .maxlen = sizeof(int),
63864+ .mode = 0600,
63865+ .proc_handler = &proc_dointvec,
63866+ },
63867+ {
63868+ .ctl_name = CTL_UNNUMBERED,
63869+ .procname = "socket_all_gid",
63870+ .data = &grsec_socket_all_gid,
63871+ .maxlen = sizeof(int),
63872+ .mode = 0600,
63873+ .proc_handler = &proc_dointvec,
63874+ },
63875+#endif
63876+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63877+ {
63878+ .ctl_name = CTL_UNNUMBERED,
63879+ .procname = "socket_client",
63880+ .data = &grsec_enable_socket_client,
63881+ .maxlen = sizeof(int),
63882+ .mode = 0600,
63883+ .proc_handler = &proc_dointvec,
63884+ },
63885+ {
63886+ .ctl_name = CTL_UNNUMBERED,
63887+ .procname = "socket_client_gid",
63888+ .data = &grsec_socket_client_gid,
63889+ .maxlen = sizeof(int),
63890+ .mode = 0600,
63891+ .proc_handler = &proc_dointvec,
63892+ },
63893+#endif
63894+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63895+ {
63896+ .ctl_name = CTL_UNNUMBERED,
63897+ .procname = "socket_server",
63898+ .data = &grsec_enable_socket_server,
63899+ .maxlen = sizeof(int),
63900+ .mode = 0600,
63901+ .proc_handler = &proc_dointvec,
63902+ },
63903+ {
63904+ .ctl_name = CTL_UNNUMBERED,
63905+ .procname = "socket_server_gid",
63906+ .data = &grsec_socket_server_gid,
63907+ .maxlen = sizeof(int),
63908+ .mode = 0600,
63909+ .proc_handler = &proc_dointvec,
63910+ },
63911+#endif
63912+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63913+ {
63914+ .ctl_name = CTL_UNNUMBERED,
63915+ .procname = "audit_group",
63916+ .data = &grsec_enable_group,
63917+ .maxlen = sizeof(int),
63918+ .mode = 0600,
63919+ .proc_handler = &proc_dointvec,
63920+ },
63921+ {
63922+ .ctl_name = CTL_UNNUMBERED,
63923+ .procname = "audit_gid",
63924+ .data = &grsec_audit_gid,
63925+ .maxlen = sizeof(int),
63926+ .mode = 0600,
63927+ .proc_handler = &proc_dointvec,
63928+ },
63929+#endif
63930+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63931+ {
63932+ .ctl_name = CTL_UNNUMBERED,
63933+ .procname = "audit_chdir",
63934+ .data = &grsec_enable_chdir,
63935+ .maxlen = sizeof(int),
63936+ .mode = 0600,
63937+ .proc_handler = &proc_dointvec,
63938+ },
63939+#endif
63940+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63941+ {
63942+ .ctl_name = CTL_UNNUMBERED,
63943+ .procname = "audit_mount",
63944+ .data = &grsec_enable_mount,
63945+ .maxlen = sizeof(int),
63946+ .mode = 0600,
63947+ .proc_handler = &proc_dointvec,
63948+ },
63949+#endif
63950+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63951+ {
63952+ .ctl_name = CTL_UNNUMBERED,
63953+ .procname = "audit_textrel",
63954+ .data = &grsec_enable_audit_textrel,
63955+ .maxlen = sizeof(int),
63956+ .mode = 0600,
63957+ .proc_handler = &proc_dointvec,
63958+ },
63959+#endif
63960+#ifdef CONFIG_GRKERNSEC_DMESG
63961+ {
63962+ .ctl_name = CTL_UNNUMBERED,
63963+ .procname = "dmesg",
63964+ .data = &grsec_enable_dmesg,
63965+ .maxlen = sizeof(int),
63966+ .mode = 0600,
63967+ .proc_handler = &proc_dointvec,
63968+ },
63969+#endif
63970+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63971+ {
63972+ .ctl_name = CTL_UNNUMBERED,
63973+ .procname = "chroot_findtask",
63974+ .data = &grsec_enable_chroot_findtask,
63975+ .maxlen = sizeof(int),
63976+ .mode = 0600,
63977+ .proc_handler = &proc_dointvec,
63978+ },
63979+#endif
63980+#ifdef CONFIG_GRKERNSEC_RESLOG
63981+ {
63982+ .ctl_name = CTL_UNNUMBERED,
63983+ .procname = "resource_logging",
63984+ .data = &grsec_resource_logging,
63985+ .maxlen = sizeof(int),
63986+ .mode = 0600,
63987+ .proc_handler = &proc_dointvec,
63988+ },
63989+#endif
63990+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63991+ {
63992+ .ctl_name = CTL_UNNUMBERED,
63993+ .procname = "audit_ptrace",
63994+ .data = &grsec_enable_audit_ptrace,
63995+ .maxlen = sizeof(int),
63996+ .mode = 0600,
63997+ .proc_handler = &proc_dointvec,
63998+ },
63999+#endif
64000+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64001+ {
64002+ .ctl_name = CTL_UNNUMBERED,
64003+ .procname = "harden_ptrace",
64004+ .data = &grsec_enable_harden_ptrace,
64005+ .maxlen = sizeof(int),
64006+ .mode = 0600,
64007+ .proc_handler = &proc_dointvec,
64008+ },
64009+#endif
64010+ {
64011+ .ctl_name = CTL_UNNUMBERED,
64012+ .procname = "grsec_lock",
64013+ .data = &grsec_lock,
64014+ .maxlen = sizeof(int),
64015+ .mode = 0600,
64016+ .proc_handler = &proc_dointvec,
64017+ },
64018+#endif
64019+#ifdef CONFIG_GRKERNSEC_ROFS
64020+ {
64021+ .ctl_name = CTL_UNNUMBERED,
64022+ .procname = "romount_protect",
64023+ .data = &grsec_enable_rofs,
64024+ .maxlen = sizeof(int),
64025+ .mode = 0600,
64026+ .proc_handler = &proc_dointvec_minmax,
64027+ .extra1 = &one,
64028+ .extra2 = &one,
64029+ },
64030+#endif
64031+ { .ctl_name = 0 }
64032+};
64033+#endif
64034diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64035new file mode 100644
64036index 0000000..0dc13c3
64037--- /dev/null
64038+++ b/grsecurity/grsec_time.c
64039@@ -0,0 +1,16 @@
64040+#include <linux/kernel.h>
64041+#include <linux/sched.h>
64042+#include <linux/grinternal.h>
64043+#include <linux/module.h>
64044+
64045+void
64046+gr_log_timechange(void)
64047+{
64048+#ifdef CONFIG_GRKERNSEC_TIME
64049+ if (grsec_enable_time)
64050+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64051+#endif
64052+ return;
64053+}
64054+
64055+EXPORT_SYMBOL(gr_log_timechange);
64056diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64057new file mode 100644
64058index 0000000..4a78774
64059--- /dev/null
64060+++ b/grsecurity/grsec_tpe.c
64061@@ -0,0 +1,39 @@
64062+#include <linux/kernel.h>
64063+#include <linux/sched.h>
64064+#include <linux/file.h>
64065+#include <linux/fs.h>
64066+#include <linux/grinternal.h>
64067+
64068+extern int gr_acl_tpe_check(void);
64069+
64070+int
64071+gr_tpe_allow(const struct file *file)
64072+{
64073+#ifdef CONFIG_GRKERNSEC
64074+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64075+ const struct cred *cred = current_cred();
64076+
64077+ if (cred->uid && ((grsec_enable_tpe &&
64078+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64079+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
64080+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
64081+#else
64082+ in_group_p(grsec_tpe_gid)
64083+#endif
64084+ ) || gr_acl_tpe_check()) &&
64085+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
64086+ (inode->i_mode & S_IWOTH))))) {
64087+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64088+ return 0;
64089+ }
64090+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64091+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
64092+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
64093+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
64094+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64095+ return 0;
64096+ }
64097+#endif
64098+#endif
64099+ return 1;
64100+}
64101diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64102new file mode 100644
64103index 0000000..9f7b1ac
64104--- /dev/null
64105+++ b/grsecurity/grsum.c
64106@@ -0,0 +1,61 @@
64107+#include <linux/err.h>
64108+#include <linux/kernel.h>
64109+#include <linux/sched.h>
64110+#include <linux/mm.h>
64111+#include <linux/scatterlist.h>
64112+#include <linux/crypto.h>
64113+#include <linux/gracl.h>
64114+
64115+
64116+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64117+#error "crypto and sha256 must be built into the kernel"
64118+#endif
64119+
64120+int
64121+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64122+{
64123+ char *p;
64124+ struct crypto_hash *tfm;
64125+ struct hash_desc desc;
64126+ struct scatterlist sg;
64127+ unsigned char temp_sum[GR_SHA_LEN];
64128+ volatile int retval = 0;
64129+ volatile int dummy = 0;
64130+ unsigned int i;
64131+
64132+ sg_init_table(&sg, 1);
64133+
64134+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64135+ if (IS_ERR(tfm)) {
64136+ /* should never happen, since sha256 should be built in */
64137+ return 1;
64138+ }
64139+
64140+ desc.tfm = tfm;
64141+ desc.flags = 0;
64142+
64143+ crypto_hash_init(&desc);
64144+
64145+ p = salt;
64146+ sg_set_buf(&sg, p, GR_SALT_LEN);
64147+ crypto_hash_update(&desc, &sg, sg.length);
64148+
64149+ p = entry->pw;
64150+ sg_set_buf(&sg, p, strlen(p));
64151+
64152+ crypto_hash_update(&desc, &sg, sg.length);
64153+
64154+ crypto_hash_final(&desc, temp_sum);
64155+
64156+ memset(entry->pw, 0, GR_PW_LEN);
64157+
64158+ for (i = 0; i < GR_SHA_LEN; i++)
64159+ if (sum[i] != temp_sum[i])
64160+ retval = 1;
64161+ else
64162+ dummy = 1; // waste a cycle
64163+
64164+ crypto_free_hash(tfm);
64165+
64166+ return retval;
64167+}
64168diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64169index 3cd9ccd..fe16d47 100644
64170--- a/include/acpi/acpi_bus.h
64171+++ b/include/acpi/acpi_bus.h
64172@@ -107,7 +107,7 @@ struct acpi_device_ops {
64173 acpi_op_bind bind;
64174 acpi_op_unbind unbind;
64175 acpi_op_notify notify;
64176-};
64177+} __no_const;
64178
64179 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64180
64181diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64182index f4906f6..71feb73 100644
64183--- a/include/acpi/acpi_drivers.h
64184+++ b/include/acpi/acpi_drivers.h
64185@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64186 Dock Station
64187 -------------------------------------------------------------------------- */
64188 struct acpi_dock_ops {
64189- acpi_notify_handler handler;
64190- acpi_notify_handler uevent;
64191+ const acpi_notify_handler handler;
64192+ const acpi_notify_handler uevent;
64193 };
64194
64195 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64196@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64197 extern int register_dock_notifier(struct notifier_block *nb);
64198 extern void unregister_dock_notifier(struct notifier_block *nb);
64199 extern int register_hotplug_dock_device(acpi_handle handle,
64200- struct acpi_dock_ops *ops,
64201+ const struct acpi_dock_ops *ops,
64202 void *context);
64203 extern void unregister_hotplug_dock_device(acpi_handle handle);
64204 #else
64205@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64206 {
64207 }
64208 static inline int register_hotplug_dock_device(acpi_handle handle,
64209- struct acpi_dock_ops *ops,
64210+ const struct acpi_dock_ops *ops,
64211 void *context)
64212 {
64213 return -ENODEV;
64214diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64215index b7babf0..a9ac9fc 100644
64216--- a/include/asm-generic/atomic-long.h
64217+++ b/include/asm-generic/atomic-long.h
64218@@ -22,6 +22,12 @@
64219
64220 typedef atomic64_t atomic_long_t;
64221
64222+#ifdef CONFIG_PAX_REFCOUNT
64223+typedef atomic64_unchecked_t atomic_long_unchecked_t;
64224+#else
64225+typedef atomic64_t atomic_long_unchecked_t;
64226+#endif
64227+
64228 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64229
64230 static inline long atomic_long_read(atomic_long_t *l)
64231@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64232 return (long)atomic64_read(v);
64233 }
64234
64235+#ifdef CONFIG_PAX_REFCOUNT
64236+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64237+{
64238+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64239+
64240+ return (long)atomic64_read_unchecked(v);
64241+}
64242+#endif
64243+
64244 static inline void atomic_long_set(atomic_long_t *l, long i)
64245 {
64246 atomic64_t *v = (atomic64_t *)l;
64247@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64248 atomic64_set(v, i);
64249 }
64250
64251+#ifdef CONFIG_PAX_REFCOUNT
64252+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64253+{
64254+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64255+
64256+ atomic64_set_unchecked(v, i);
64257+}
64258+#endif
64259+
64260 static inline void atomic_long_inc(atomic_long_t *l)
64261 {
64262 atomic64_t *v = (atomic64_t *)l;
64263@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64264 atomic64_inc(v);
64265 }
64266
64267+#ifdef CONFIG_PAX_REFCOUNT
64268+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64269+{
64270+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64271+
64272+ atomic64_inc_unchecked(v);
64273+}
64274+#endif
64275+
64276 static inline void atomic_long_dec(atomic_long_t *l)
64277 {
64278 atomic64_t *v = (atomic64_t *)l;
64279@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64280 atomic64_dec(v);
64281 }
64282
64283+#ifdef CONFIG_PAX_REFCOUNT
64284+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64285+{
64286+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64287+
64288+ atomic64_dec_unchecked(v);
64289+}
64290+#endif
64291+
64292 static inline void atomic_long_add(long i, atomic_long_t *l)
64293 {
64294 atomic64_t *v = (atomic64_t *)l;
64295@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64296 atomic64_add(i, v);
64297 }
64298
64299+#ifdef CONFIG_PAX_REFCOUNT
64300+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64301+{
64302+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64303+
64304+ atomic64_add_unchecked(i, v);
64305+}
64306+#endif
64307+
64308 static inline void atomic_long_sub(long i, atomic_long_t *l)
64309 {
64310 atomic64_t *v = (atomic64_t *)l;
64311@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64312 return (long)atomic64_inc_return(v);
64313 }
64314
64315+#ifdef CONFIG_PAX_REFCOUNT
64316+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64317+{
64318+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64319+
64320+ return (long)atomic64_inc_return_unchecked(v);
64321+}
64322+#endif
64323+
64324 static inline long atomic_long_dec_return(atomic_long_t *l)
64325 {
64326 atomic64_t *v = (atomic64_t *)l;
64327@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64328
64329 typedef atomic_t atomic_long_t;
64330
64331+#ifdef CONFIG_PAX_REFCOUNT
64332+typedef atomic_unchecked_t atomic_long_unchecked_t;
64333+#else
64334+typedef atomic_t atomic_long_unchecked_t;
64335+#endif
64336+
64337 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
64338 static inline long atomic_long_read(atomic_long_t *l)
64339 {
64340@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64341 return (long)atomic_read(v);
64342 }
64343
64344+#ifdef CONFIG_PAX_REFCOUNT
64345+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64346+{
64347+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64348+
64349+ return (long)atomic_read_unchecked(v);
64350+}
64351+#endif
64352+
64353 static inline void atomic_long_set(atomic_long_t *l, long i)
64354 {
64355 atomic_t *v = (atomic_t *)l;
64356@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64357 atomic_set(v, i);
64358 }
64359
64360+#ifdef CONFIG_PAX_REFCOUNT
64361+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64362+{
64363+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64364+
64365+ atomic_set_unchecked(v, i);
64366+}
64367+#endif
64368+
64369 static inline void atomic_long_inc(atomic_long_t *l)
64370 {
64371 atomic_t *v = (atomic_t *)l;
64372@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64373 atomic_inc(v);
64374 }
64375
64376+#ifdef CONFIG_PAX_REFCOUNT
64377+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64378+{
64379+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64380+
64381+ atomic_inc_unchecked(v);
64382+}
64383+#endif
64384+
64385 static inline void atomic_long_dec(atomic_long_t *l)
64386 {
64387 atomic_t *v = (atomic_t *)l;
64388@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64389 atomic_dec(v);
64390 }
64391
64392+#ifdef CONFIG_PAX_REFCOUNT
64393+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64394+{
64395+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64396+
64397+ atomic_dec_unchecked(v);
64398+}
64399+#endif
64400+
64401 static inline void atomic_long_add(long i, atomic_long_t *l)
64402 {
64403 atomic_t *v = (atomic_t *)l;
64404@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64405 atomic_add(i, v);
64406 }
64407
64408+#ifdef CONFIG_PAX_REFCOUNT
64409+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64410+{
64411+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64412+
64413+ atomic_add_unchecked(i, v);
64414+}
64415+#endif
64416+
64417 static inline void atomic_long_sub(long i, atomic_long_t *l)
64418 {
64419 atomic_t *v = (atomic_t *)l;
64420@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64421 return (long)atomic_inc_return(v);
64422 }
64423
64424+#ifdef CONFIG_PAX_REFCOUNT
64425+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64426+{
64427+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64428+
64429+ return (long)atomic_inc_return_unchecked(v);
64430+}
64431+#endif
64432+
64433 static inline long atomic_long_dec_return(atomic_long_t *l)
64434 {
64435 atomic_t *v = (atomic_t *)l;
64436@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64437
64438 #endif /* BITS_PER_LONG == 64 */
64439
64440+#ifdef CONFIG_PAX_REFCOUNT
64441+static inline void pax_refcount_needs_these_functions(void)
64442+{
64443+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
64444+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
64445+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
64446+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
64447+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
64448+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
64449+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
64450+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
64451+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
64452+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
64453+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
64454+
64455+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
64456+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
64457+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
64458+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
64459+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
64460+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
64461+}
64462+#else
64463+#define atomic_read_unchecked(v) atomic_read(v)
64464+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
64465+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
64466+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
64467+#define atomic_inc_unchecked(v) atomic_inc(v)
64468+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
64469+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
64470+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
64471+#define atomic_dec_unchecked(v) atomic_dec(v)
64472+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
64473+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
64474+
64475+#define atomic_long_read_unchecked(v) atomic_long_read(v)
64476+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
64477+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
64478+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
64479+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
64480+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
64481+#endif
64482+
64483 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
64484diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
64485index d48ddf0..656a0ac 100644
64486--- a/include/asm-generic/bug.h
64487+++ b/include/asm-generic/bug.h
64488@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
64489
64490 #else /* !CONFIG_BUG */
64491 #ifndef HAVE_ARCH_BUG
64492-#define BUG() do {} while(0)
64493+#define BUG() do { for (;;) ; } while(0)
64494 #endif
64495
64496 #ifndef HAVE_ARCH_BUG_ON
64497-#define BUG_ON(condition) do { if (condition) ; } while(0)
64498+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
64499 #endif
64500
64501 #ifndef HAVE_ARCH_WARN_ON
64502diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
64503index 1bfcfe5..e04c5c9 100644
64504--- a/include/asm-generic/cache.h
64505+++ b/include/asm-generic/cache.h
64506@@ -6,7 +6,7 @@
64507 * cache lines need to provide their own cache.h.
64508 */
64509
64510-#define L1_CACHE_SHIFT 5
64511-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
64512+#define L1_CACHE_SHIFT 5UL
64513+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
64514
64515 #endif /* __ASM_GENERIC_CACHE_H */
64516diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
64517index 6920695..41038bc 100644
64518--- a/include/asm-generic/dma-mapping-common.h
64519+++ b/include/asm-generic/dma-mapping-common.h
64520@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
64521 enum dma_data_direction dir,
64522 struct dma_attrs *attrs)
64523 {
64524- struct dma_map_ops *ops = get_dma_ops(dev);
64525+ const struct dma_map_ops *ops = get_dma_ops(dev);
64526 dma_addr_t addr;
64527
64528 kmemcheck_mark_initialized(ptr, size);
64529@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
64530 enum dma_data_direction dir,
64531 struct dma_attrs *attrs)
64532 {
64533- struct dma_map_ops *ops = get_dma_ops(dev);
64534+ const struct dma_map_ops *ops = get_dma_ops(dev);
64535
64536 BUG_ON(!valid_dma_direction(dir));
64537 if (ops->unmap_page)
64538@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
64539 int nents, enum dma_data_direction dir,
64540 struct dma_attrs *attrs)
64541 {
64542- struct dma_map_ops *ops = get_dma_ops(dev);
64543+ const struct dma_map_ops *ops = get_dma_ops(dev);
64544 int i, ents;
64545 struct scatterlist *s;
64546
64547@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
64548 int nents, enum dma_data_direction dir,
64549 struct dma_attrs *attrs)
64550 {
64551- struct dma_map_ops *ops = get_dma_ops(dev);
64552+ const struct dma_map_ops *ops = get_dma_ops(dev);
64553
64554 BUG_ON(!valid_dma_direction(dir));
64555 debug_dma_unmap_sg(dev, sg, nents, dir);
64556@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64557 size_t offset, size_t size,
64558 enum dma_data_direction dir)
64559 {
64560- struct dma_map_ops *ops = get_dma_ops(dev);
64561+ const struct dma_map_ops *ops = get_dma_ops(dev);
64562 dma_addr_t addr;
64563
64564 kmemcheck_mark_initialized(page_address(page) + offset, size);
64565@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64566 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
64567 size_t size, enum dma_data_direction dir)
64568 {
64569- struct dma_map_ops *ops = get_dma_ops(dev);
64570+ const struct dma_map_ops *ops = get_dma_ops(dev);
64571
64572 BUG_ON(!valid_dma_direction(dir));
64573 if (ops->unmap_page)
64574@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
64575 size_t size,
64576 enum dma_data_direction dir)
64577 {
64578- struct dma_map_ops *ops = get_dma_ops(dev);
64579+ const struct dma_map_ops *ops = get_dma_ops(dev);
64580
64581 BUG_ON(!valid_dma_direction(dir));
64582 if (ops->sync_single_for_cpu)
64583@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
64584 dma_addr_t addr, size_t size,
64585 enum dma_data_direction dir)
64586 {
64587- struct dma_map_ops *ops = get_dma_ops(dev);
64588+ const struct dma_map_ops *ops = get_dma_ops(dev);
64589
64590 BUG_ON(!valid_dma_direction(dir));
64591 if (ops->sync_single_for_device)
64592@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
64593 size_t size,
64594 enum dma_data_direction dir)
64595 {
64596- struct dma_map_ops *ops = get_dma_ops(dev);
64597+ const struct dma_map_ops *ops = get_dma_ops(dev);
64598
64599 BUG_ON(!valid_dma_direction(dir));
64600 if (ops->sync_single_range_for_cpu) {
64601@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
64602 size_t size,
64603 enum dma_data_direction dir)
64604 {
64605- struct dma_map_ops *ops = get_dma_ops(dev);
64606+ const struct dma_map_ops *ops = get_dma_ops(dev);
64607
64608 BUG_ON(!valid_dma_direction(dir));
64609 if (ops->sync_single_range_for_device) {
64610@@ -155,7 +155,7 @@ static inline void
64611 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
64612 int nelems, enum dma_data_direction dir)
64613 {
64614- struct dma_map_ops *ops = get_dma_ops(dev);
64615+ const struct dma_map_ops *ops = get_dma_ops(dev);
64616
64617 BUG_ON(!valid_dma_direction(dir));
64618 if (ops->sync_sg_for_cpu)
64619@@ -167,7 +167,7 @@ static inline void
64620 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
64621 int nelems, enum dma_data_direction dir)
64622 {
64623- struct dma_map_ops *ops = get_dma_ops(dev);
64624+ const struct dma_map_ops *ops = get_dma_ops(dev);
64625
64626 BUG_ON(!valid_dma_direction(dir));
64627 if (ops->sync_sg_for_device)
64628diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
64629index 0d68a1e..b74a761 100644
64630--- a/include/asm-generic/emergency-restart.h
64631+++ b/include/asm-generic/emergency-restart.h
64632@@ -1,7 +1,7 @@
64633 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
64634 #define _ASM_GENERIC_EMERGENCY_RESTART_H
64635
64636-static inline void machine_emergency_restart(void)
64637+static inline __noreturn void machine_emergency_restart(void)
64638 {
64639 machine_restart(NULL);
64640 }
64641diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
64642index 3c2344f..4590a7d 100644
64643--- a/include/asm-generic/futex.h
64644+++ b/include/asm-generic/futex.h
64645@@ -6,7 +6,7 @@
64646 #include <asm/errno.h>
64647
64648 static inline int
64649-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64650+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
64651 {
64652 int op = (encoded_op >> 28) & 7;
64653 int cmp = (encoded_op >> 24) & 15;
64654@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64655 }
64656
64657 static inline int
64658-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
64659+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
64660 {
64661 return -ENOSYS;
64662 }
64663diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
64664index 1ca3efc..e3dc852 100644
64665--- a/include/asm-generic/int-l64.h
64666+++ b/include/asm-generic/int-l64.h
64667@@ -46,6 +46,8 @@ typedef unsigned int u32;
64668 typedef signed long s64;
64669 typedef unsigned long u64;
64670
64671+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
64672+
64673 #define S8_C(x) x
64674 #define U8_C(x) x ## U
64675 #define S16_C(x) x
64676diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
64677index f394147..b6152b9 100644
64678--- a/include/asm-generic/int-ll64.h
64679+++ b/include/asm-generic/int-ll64.h
64680@@ -51,6 +51,8 @@ typedef unsigned int u32;
64681 typedef signed long long s64;
64682 typedef unsigned long long u64;
64683
64684+typedef unsigned long long intoverflow_t;
64685+
64686 #define S8_C(x) x
64687 #define U8_C(x) x ## U
64688 #define S16_C(x) x
64689diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
64690index e5f234a..cdb16b3 100644
64691--- a/include/asm-generic/kmap_types.h
64692+++ b/include/asm-generic/kmap_types.h
64693@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
64694 KMAP_D(16) KM_IRQ_PTE,
64695 KMAP_D(17) KM_NMI,
64696 KMAP_D(18) KM_NMI_PTE,
64697-KMAP_D(19) KM_TYPE_NR
64698+KMAP_D(19) KM_CLEARPAGE,
64699+KMAP_D(20) KM_TYPE_NR
64700 };
64701
64702 #undef KMAP_D
64703diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
64704index 725612b..9cc513a 100644
64705--- a/include/asm-generic/pgtable-nopmd.h
64706+++ b/include/asm-generic/pgtable-nopmd.h
64707@@ -1,14 +1,19 @@
64708 #ifndef _PGTABLE_NOPMD_H
64709 #define _PGTABLE_NOPMD_H
64710
64711-#ifndef __ASSEMBLY__
64712-
64713 #include <asm-generic/pgtable-nopud.h>
64714
64715-struct mm_struct;
64716-
64717 #define __PAGETABLE_PMD_FOLDED
64718
64719+#define PMD_SHIFT PUD_SHIFT
64720+#define PTRS_PER_PMD 1
64721+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
64722+#define PMD_MASK (~(PMD_SIZE-1))
64723+
64724+#ifndef __ASSEMBLY__
64725+
64726+struct mm_struct;
64727+
64728 /*
64729 * Having the pmd type consist of a pud gets the size right, and allows
64730 * us to conceptually access the pud entry that this pmd is folded into
64731@@ -16,11 +21,6 @@ struct mm_struct;
64732 */
64733 typedef struct { pud_t pud; } pmd_t;
64734
64735-#define PMD_SHIFT PUD_SHIFT
64736-#define PTRS_PER_PMD 1
64737-#define PMD_SIZE (1UL << PMD_SHIFT)
64738-#define PMD_MASK (~(PMD_SIZE-1))
64739-
64740 /*
64741 * The "pud_xxx()" functions here are trivial for a folded two-level
64742 * setup: the pmd is never bad, and a pmd always exists (as it's folded
64743diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
64744index 810431d..ccc3638 100644
64745--- a/include/asm-generic/pgtable-nopud.h
64746+++ b/include/asm-generic/pgtable-nopud.h
64747@@ -1,10 +1,15 @@
64748 #ifndef _PGTABLE_NOPUD_H
64749 #define _PGTABLE_NOPUD_H
64750
64751-#ifndef __ASSEMBLY__
64752-
64753 #define __PAGETABLE_PUD_FOLDED
64754
64755+#define PUD_SHIFT PGDIR_SHIFT
64756+#define PTRS_PER_PUD 1
64757+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
64758+#define PUD_MASK (~(PUD_SIZE-1))
64759+
64760+#ifndef __ASSEMBLY__
64761+
64762 /*
64763 * Having the pud type consist of a pgd gets the size right, and allows
64764 * us to conceptually access the pgd entry that this pud is folded into
64765@@ -12,11 +17,6 @@
64766 */
64767 typedef struct { pgd_t pgd; } pud_t;
64768
64769-#define PUD_SHIFT PGDIR_SHIFT
64770-#define PTRS_PER_PUD 1
64771-#define PUD_SIZE (1UL << PUD_SHIFT)
64772-#define PUD_MASK (~(PUD_SIZE-1))
64773-
64774 /*
64775 * The "pgd_xxx()" functions here are trivial for a folded two-level
64776 * setup: the pud is never bad, and a pud always exists (as it's folded
64777diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
64778index e2bd73e..fea8ed3 100644
64779--- a/include/asm-generic/pgtable.h
64780+++ b/include/asm-generic/pgtable.h
64781@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
64782 unsigned long size);
64783 #endif
64784
64785+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
64786+static inline unsigned long pax_open_kernel(void) { return 0; }
64787+#endif
64788+
64789+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
64790+static inline unsigned long pax_close_kernel(void) { return 0; }
64791+#endif
64792+
64793 #endif /* !__ASSEMBLY__ */
64794
64795 #endif /* _ASM_GENERIC_PGTABLE_H */
64796diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
64797index b6e818f..21aa58a 100644
64798--- a/include/asm-generic/vmlinux.lds.h
64799+++ b/include/asm-generic/vmlinux.lds.h
64800@@ -199,6 +199,7 @@
64801 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
64802 VMLINUX_SYMBOL(__start_rodata) = .; \
64803 *(.rodata) *(.rodata.*) \
64804+ *(.data.read_only) \
64805 *(__vermagic) /* Kernel version magic */ \
64806 *(__markers_strings) /* Markers: strings */ \
64807 *(__tracepoints_strings)/* Tracepoints: strings */ \
64808@@ -656,22 +657,24 @@
64809 * section in the linker script will go there too. @phdr should have
64810 * a leading colon.
64811 *
64812- * Note that this macros defines __per_cpu_load as an absolute symbol.
64813+ * Note that this macros defines per_cpu_load as an absolute symbol.
64814 * If there is no need to put the percpu section at a predetermined
64815 * address, use PERCPU().
64816 */
64817 #define PERCPU_VADDR(vaddr, phdr) \
64818- VMLINUX_SYMBOL(__per_cpu_load) = .; \
64819- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
64820+ per_cpu_load = .; \
64821+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
64822 - LOAD_OFFSET) { \
64823+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
64824 VMLINUX_SYMBOL(__per_cpu_start) = .; \
64825 *(.data.percpu.first) \
64826- *(.data.percpu.page_aligned) \
64827 *(.data.percpu) \
64828+ . = ALIGN(PAGE_SIZE); \
64829+ *(.data.percpu.page_aligned) \
64830 *(.data.percpu.shared_aligned) \
64831 VMLINUX_SYMBOL(__per_cpu_end) = .; \
64832 } phdr \
64833- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
64834+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
64835
64836 /**
64837 * PERCPU - define output section for percpu area, simple version
64838diff --git a/include/drm/drmP.h b/include/drm/drmP.h
64839index 66713c6..98c0460 100644
64840--- a/include/drm/drmP.h
64841+++ b/include/drm/drmP.h
64842@@ -71,6 +71,7 @@
64843 #include <linux/workqueue.h>
64844 #include <linux/poll.h>
64845 #include <asm/pgalloc.h>
64846+#include <asm/local.h>
64847 #include "drm.h"
64848
64849 #include <linux/idr.h>
64850@@ -814,7 +815,7 @@ struct drm_driver {
64851 void (*vgaarb_irq)(struct drm_device *dev, bool state);
64852
64853 /* Driver private ops for this object */
64854- struct vm_operations_struct *gem_vm_ops;
64855+ const struct vm_operations_struct *gem_vm_ops;
64856
64857 int major;
64858 int minor;
64859@@ -917,7 +918,7 @@ struct drm_device {
64860
64861 /** \name Usage Counters */
64862 /*@{ */
64863- int open_count; /**< Outstanding files open */
64864+ local_t open_count; /**< Outstanding files open */
64865 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
64866 atomic_t vma_count; /**< Outstanding vma areas open */
64867 int buf_use; /**< Buffers in use -- cannot alloc */
64868@@ -928,7 +929,7 @@ struct drm_device {
64869 /*@{ */
64870 unsigned long counters;
64871 enum drm_stat_type types[15];
64872- atomic_t counts[15];
64873+ atomic_unchecked_t counts[15];
64874 /*@} */
64875
64876 struct list_head filelist;
64877@@ -1016,7 +1017,7 @@ struct drm_device {
64878 struct pci_controller *hose;
64879 #endif
64880 struct drm_sg_mem *sg; /**< Scatter gather memory */
64881- unsigned int num_crtcs; /**< Number of CRTCs on this device */
64882+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
64883 void *dev_private; /**< device private data */
64884 void *mm_private;
64885 struct address_space *dev_mapping;
64886@@ -1042,11 +1043,11 @@ struct drm_device {
64887 spinlock_t object_name_lock;
64888 struct idr object_name_idr;
64889 atomic_t object_count;
64890- atomic_t object_memory;
64891+ atomic_unchecked_t object_memory;
64892 atomic_t pin_count;
64893- atomic_t pin_memory;
64894+ atomic_unchecked_t pin_memory;
64895 atomic_t gtt_count;
64896- atomic_t gtt_memory;
64897+ atomic_unchecked_t gtt_memory;
64898 uint32_t gtt_total;
64899 uint32_t invalidate_domains; /* domains pending invalidation */
64900 uint32_t flush_domains; /* domains pending flush */
64901diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
64902index b29e201..3413cc9 100644
64903--- a/include/drm/drm_crtc_helper.h
64904+++ b/include/drm/drm_crtc_helper.h
64905@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
64906
64907 /* reload the current crtc LUT */
64908 void (*load_lut)(struct drm_crtc *crtc);
64909-};
64910+} __no_const;
64911
64912 struct drm_encoder_helper_funcs {
64913 void (*dpms)(struct drm_encoder *encoder, int mode);
64914@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
64915 struct drm_connector *connector);
64916 /* disable encoder when not in use - more explicit than dpms off */
64917 void (*disable)(struct drm_encoder *encoder);
64918-};
64919+} __no_const;
64920
64921 struct drm_connector_helper_funcs {
64922 int (*get_modes)(struct drm_connector *connector);
64923diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
64924index b199170..6f9e64c 100644
64925--- a/include/drm/ttm/ttm_memory.h
64926+++ b/include/drm/ttm/ttm_memory.h
64927@@ -47,7 +47,7 @@
64928
64929 struct ttm_mem_shrink {
64930 int (*do_shrink) (struct ttm_mem_shrink *);
64931-};
64932+} __no_const;
64933
64934 /**
64935 * struct ttm_mem_global - Global memory accounting structure.
64936diff --git a/include/linux/a.out.h b/include/linux/a.out.h
64937index e86dfca..40cc55f 100644
64938--- a/include/linux/a.out.h
64939+++ b/include/linux/a.out.h
64940@@ -39,6 +39,14 @@ enum machine_type {
64941 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
64942 };
64943
64944+/* Constants for the N_FLAGS field */
64945+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
64946+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
64947+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
64948+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
64949+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
64950+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
64951+
64952 #if !defined (N_MAGIC)
64953 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
64954 #endif
64955diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
64956index 817b237..62c10bc 100644
64957--- a/include/linux/atmdev.h
64958+++ b/include/linux/atmdev.h
64959@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
64960 #endif
64961
64962 struct k_atm_aal_stats {
64963-#define __HANDLE_ITEM(i) atomic_t i
64964+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64965 __AAL_STAT_ITEMS
64966 #undef __HANDLE_ITEM
64967 };
64968diff --git a/include/linux/backlight.h b/include/linux/backlight.h
64969index 0f5f578..8c4f884 100644
64970--- a/include/linux/backlight.h
64971+++ b/include/linux/backlight.h
64972@@ -36,18 +36,18 @@ struct backlight_device;
64973 struct fb_info;
64974
64975 struct backlight_ops {
64976- unsigned int options;
64977+ const unsigned int options;
64978
64979 #define BL_CORE_SUSPENDRESUME (1 << 0)
64980
64981 /* Notify the backlight driver some property has changed */
64982- int (*update_status)(struct backlight_device *);
64983+ int (* const update_status)(struct backlight_device *);
64984 /* Return the current backlight brightness (accounting for power,
64985 fb_blank etc.) */
64986- int (*get_brightness)(struct backlight_device *);
64987+ int (* const get_brightness)(struct backlight_device *);
64988 /* Check if given framebuffer device is the one bound to this backlight;
64989 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
64990- int (*check_fb)(struct fb_info *);
64991+ int (* const check_fb)(struct fb_info *);
64992 };
64993
64994 /* This structure defines all the properties of a backlight */
64995@@ -86,7 +86,7 @@ struct backlight_device {
64996 registered this device has been unloaded, and if class_get_devdata()
64997 points to something in the body of that driver, it is also invalid. */
64998 struct mutex ops_lock;
64999- struct backlight_ops *ops;
65000+ const struct backlight_ops *ops;
65001
65002 /* The framebuffer notifier block */
65003 struct notifier_block fb_notif;
65004@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65005 }
65006
65007 extern struct backlight_device *backlight_device_register(const char *name,
65008- struct device *dev, void *devdata, struct backlight_ops *ops);
65009+ struct device *dev, void *devdata, const struct backlight_ops *ops);
65010 extern void backlight_device_unregister(struct backlight_device *bd);
65011 extern void backlight_force_update(struct backlight_device *bd,
65012 enum backlight_update_reason reason);
65013diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65014index a3d802e..482f69c 100644
65015--- a/include/linux/binfmts.h
65016+++ b/include/linux/binfmts.h
65017@@ -83,6 +83,7 @@ struct linux_binfmt {
65018 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65019 int (*load_shlib)(struct file *);
65020 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65021+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65022 unsigned long min_coredump; /* minimal dump size */
65023 int hasvdso;
65024 };
65025diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65026index a06bfab..4fa38bb 100644
65027--- a/include/linux/blkdev.h
65028+++ b/include/linux/blkdev.h
65029@@ -1278,7 +1278,7 @@ struct block_device_operations {
65030 int (*revalidate_disk) (struct gendisk *);
65031 int (*getgeo)(struct block_device *, struct hd_geometry *);
65032 struct module *owner;
65033-};
65034+} __do_const;
65035
65036 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65037 unsigned long);
65038diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65039index 3b73b99..629d21b 100644
65040--- a/include/linux/blktrace_api.h
65041+++ b/include/linux/blktrace_api.h
65042@@ -160,7 +160,7 @@ struct blk_trace {
65043 struct dentry *dir;
65044 struct dentry *dropped_file;
65045 struct dentry *msg_file;
65046- atomic_t dropped;
65047+ atomic_unchecked_t dropped;
65048 };
65049
65050 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65051diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65052index 83195fb..0b0f77d 100644
65053--- a/include/linux/byteorder/little_endian.h
65054+++ b/include/linux/byteorder/little_endian.h
65055@@ -42,51 +42,51 @@
65056
65057 static inline __le64 __cpu_to_le64p(const __u64 *p)
65058 {
65059- return (__force __le64)*p;
65060+ return (__force const __le64)*p;
65061 }
65062 static inline __u64 __le64_to_cpup(const __le64 *p)
65063 {
65064- return (__force __u64)*p;
65065+ return (__force const __u64)*p;
65066 }
65067 static inline __le32 __cpu_to_le32p(const __u32 *p)
65068 {
65069- return (__force __le32)*p;
65070+ return (__force const __le32)*p;
65071 }
65072 static inline __u32 __le32_to_cpup(const __le32 *p)
65073 {
65074- return (__force __u32)*p;
65075+ return (__force const __u32)*p;
65076 }
65077 static inline __le16 __cpu_to_le16p(const __u16 *p)
65078 {
65079- return (__force __le16)*p;
65080+ return (__force const __le16)*p;
65081 }
65082 static inline __u16 __le16_to_cpup(const __le16 *p)
65083 {
65084- return (__force __u16)*p;
65085+ return (__force const __u16)*p;
65086 }
65087 static inline __be64 __cpu_to_be64p(const __u64 *p)
65088 {
65089- return (__force __be64)__swab64p(p);
65090+ return (__force const __be64)__swab64p(p);
65091 }
65092 static inline __u64 __be64_to_cpup(const __be64 *p)
65093 {
65094- return __swab64p((__u64 *)p);
65095+ return __swab64p((const __u64 *)p);
65096 }
65097 static inline __be32 __cpu_to_be32p(const __u32 *p)
65098 {
65099- return (__force __be32)__swab32p(p);
65100+ return (__force const __be32)__swab32p(p);
65101 }
65102 static inline __u32 __be32_to_cpup(const __be32 *p)
65103 {
65104- return __swab32p((__u32 *)p);
65105+ return __swab32p((const __u32 *)p);
65106 }
65107 static inline __be16 __cpu_to_be16p(const __u16 *p)
65108 {
65109- return (__force __be16)__swab16p(p);
65110+ return (__force const __be16)__swab16p(p);
65111 }
65112 static inline __u16 __be16_to_cpup(const __be16 *p)
65113 {
65114- return __swab16p((__u16 *)p);
65115+ return __swab16p((const __u16 *)p);
65116 }
65117 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65118 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65119diff --git a/include/linux/cache.h b/include/linux/cache.h
65120index 97e2488..e7576b9 100644
65121--- a/include/linux/cache.h
65122+++ b/include/linux/cache.h
65123@@ -16,6 +16,10 @@
65124 #define __read_mostly
65125 #endif
65126
65127+#ifndef __read_only
65128+#define __read_only __read_mostly
65129+#endif
65130+
65131 #ifndef ____cacheline_aligned
65132 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65133 #endif
65134diff --git a/include/linux/capability.h b/include/linux/capability.h
65135index c8f2a5f7..1618a5c 100644
65136--- a/include/linux/capability.h
65137+++ b/include/linux/capability.h
65138@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65139 (security_real_capable_noaudit((t), (cap)) == 0)
65140
65141 extern int capable(int cap);
65142+int capable_nolog(int cap);
65143
65144 /* audit system wants to get cap info from files as well */
65145 struct dentry;
65146diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65147index 450fa59..86019fb 100644
65148--- a/include/linux/compiler-gcc4.h
65149+++ b/include/linux/compiler-gcc4.h
65150@@ -36,4 +36,16 @@
65151 the kernel context */
65152 #define __cold __attribute__((__cold__))
65153
65154+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65155+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65156+#define __bos0(ptr) __bos((ptr), 0)
65157+#define __bos1(ptr) __bos((ptr), 1)
65158+
65159+#if __GNUC_MINOR__ >= 5
65160+#ifdef CONSTIFY_PLUGIN
65161+#define __no_const __attribute__((no_const))
65162+#define __do_const __attribute__((do_const))
65163+#endif
65164+#endif
65165+
65166 #endif
65167diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65168index 04fb513..fd6477b 100644
65169--- a/include/linux/compiler.h
65170+++ b/include/linux/compiler.h
65171@@ -5,11 +5,14 @@
65172
65173 #ifdef __CHECKER__
65174 # define __user __attribute__((noderef, address_space(1)))
65175+# define __force_user __force __user
65176 # define __kernel /* default address space */
65177+# define __force_kernel __force __kernel
65178 # define __safe __attribute__((safe))
65179 # define __force __attribute__((force))
65180 # define __nocast __attribute__((nocast))
65181 # define __iomem __attribute__((noderef, address_space(2)))
65182+# define __force_iomem __force __iomem
65183 # define __acquires(x) __attribute__((context(x,0,1)))
65184 # define __releases(x) __attribute__((context(x,1,0)))
65185 # define __acquire(x) __context__(x,1)
65186@@ -17,13 +20,34 @@
65187 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65188 extern void __chk_user_ptr(const volatile void __user *);
65189 extern void __chk_io_ptr(const volatile void __iomem *);
65190+#elif defined(CHECKER_PLUGIN)
65191+//# define __user
65192+//# define __force_user
65193+//# define __kernel
65194+//# define __force_kernel
65195+# define __safe
65196+# define __force
65197+# define __nocast
65198+# define __iomem
65199+# define __force_iomem
65200+# define __chk_user_ptr(x) (void)0
65201+# define __chk_io_ptr(x) (void)0
65202+# define __builtin_warning(x, y...) (1)
65203+# define __acquires(x)
65204+# define __releases(x)
65205+# define __acquire(x) (void)0
65206+# define __release(x) (void)0
65207+# define __cond_lock(x,c) (c)
65208 #else
65209 # define __user
65210+# define __force_user
65211 # define __kernel
65212+# define __force_kernel
65213 # define __safe
65214 # define __force
65215 # define __nocast
65216 # define __iomem
65217+# define __force_iomem
65218 # define __chk_user_ptr(x) (void)0
65219 # define __chk_io_ptr(x) (void)0
65220 # define __builtin_warning(x, y...) (1)
65221@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65222 # define __attribute_const__ /* unimplemented */
65223 #endif
65224
65225+#ifndef __no_const
65226+# define __no_const
65227+#endif
65228+
65229+#ifndef __do_const
65230+# define __do_const
65231+#endif
65232+
65233 /*
65234 * Tell gcc if a function is cold. The compiler will assume any path
65235 * directly leading to the call is unlikely.
65236@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65237 #define __cold
65238 #endif
65239
65240+#ifndef __alloc_size
65241+#define __alloc_size(...)
65242+#endif
65243+
65244+#ifndef __bos
65245+#define __bos(ptr, arg)
65246+#endif
65247+
65248+#ifndef __bos0
65249+#define __bos0(ptr)
65250+#endif
65251+
65252+#ifndef __bos1
65253+#define __bos1(ptr)
65254+#endif
65255+
65256 /* Simple shorthand for a section definition */
65257 #ifndef __section
65258 # define __section(S) __attribute__ ((__section__(#S)))
65259@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65260 * use is to mediate communication between process-level code and irq/NMI
65261 * handlers, all running on the same CPU.
65262 */
65263-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
65264+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
65265+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
65266
65267 #endif /* __LINUX_COMPILER_H */
65268diff --git a/include/linux/crypto.h b/include/linux/crypto.h
65269index fd92988..a3164bd 100644
65270--- a/include/linux/crypto.h
65271+++ b/include/linux/crypto.h
65272@@ -394,7 +394,7 @@ struct cipher_tfm {
65273 const u8 *key, unsigned int keylen);
65274 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65275 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65276-};
65277+} __no_const;
65278
65279 struct hash_tfm {
65280 int (*init)(struct hash_desc *desc);
65281@@ -415,13 +415,13 @@ struct compress_tfm {
65282 int (*cot_decompress)(struct crypto_tfm *tfm,
65283 const u8 *src, unsigned int slen,
65284 u8 *dst, unsigned int *dlen);
65285-};
65286+} __no_const;
65287
65288 struct rng_tfm {
65289 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
65290 unsigned int dlen);
65291 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
65292-};
65293+} __no_const;
65294
65295 #define crt_ablkcipher crt_u.ablkcipher
65296 #define crt_aead crt_u.aead
65297diff --git a/include/linux/dcache.h b/include/linux/dcache.h
65298index 30b93b2..cd7a8db 100644
65299--- a/include/linux/dcache.h
65300+++ b/include/linux/dcache.h
65301@@ -119,6 +119,8 @@ struct dentry {
65302 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
65303 };
65304
65305+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
65306+
65307 /*
65308 * dentry->d_lock spinlock nesting subclasses:
65309 *
65310diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
65311index 3e9bd6a..f4e1aa0 100644
65312--- a/include/linux/decompress/mm.h
65313+++ b/include/linux/decompress/mm.h
65314@@ -78,7 +78,7 @@ static void free(void *where)
65315 * warnings when not needed (indeed large_malloc / large_free are not
65316 * needed by inflate */
65317
65318-#define malloc(a) kmalloc(a, GFP_KERNEL)
65319+#define malloc(a) kmalloc((a), GFP_KERNEL)
65320 #define free(a) kfree(a)
65321
65322 #define large_malloc(a) vmalloc(a)
65323diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
65324index 91b7618..92a93d32 100644
65325--- a/include/linux/dma-mapping.h
65326+++ b/include/linux/dma-mapping.h
65327@@ -16,51 +16,51 @@ enum dma_data_direction {
65328 };
65329
65330 struct dma_map_ops {
65331- void* (*alloc_coherent)(struct device *dev, size_t size,
65332+ void* (* const alloc_coherent)(struct device *dev, size_t size,
65333 dma_addr_t *dma_handle, gfp_t gfp);
65334- void (*free_coherent)(struct device *dev, size_t size,
65335+ void (* const free_coherent)(struct device *dev, size_t size,
65336 void *vaddr, dma_addr_t dma_handle);
65337- dma_addr_t (*map_page)(struct device *dev, struct page *page,
65338+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
65339 unsigned long offset, size_t size,
65340 enum dma_data_direction dir,
65341 struct dma_attrs *attrs);
65342- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
65343+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
65344 size_t size, enum dma_data_direction dir,
65345 struct dma_attrs *attrs);
65346- int (*map_sg)(struct device *dev, struct scatterlist *sg,
65347+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
65348 int nents, enum dma_data_direction dir,
65349 struct dma_attrs *attrs);
65350- void (*unmap_sg)(struct device *dev,
65351+ void (* const unmap_sg)(struct device *dev,
65352 struct scatterlist *sg, int nents,
65353 enum dma_data_direction dir,
65354 struct dma_attrs *attrs);
65355- void (*sync_single_for_cpu)(struct device *dev,
65356+ void (* const sync_single_for_cpu)(struct device *dev,
65357 dma_addr_t dma_handle, size_t size,
65358 enum dma_data_direction dir);
65359- void (*sync_single_for_device)(struct device *dev,
65360+ void (* const sync_single_for_device)(struct device *dev,
65361 dma_addr_t dma_handle, size_t size,
65362 enum dma_data_direction dir);
65363- void (*sync_single_range_for_cpu)(struct device *dev,
65364+ void (* const sync_single_range_for_cpu)(struct device *dev,
65365 dma_addr_t dma_handle,
65366 unsigned long offset,
65367 size_t size,
65368 enum dma_data_direction dir);
65369- void (*sync_single_range_for_device)(struct device *dev,
65370+ void (* const sync_single_range_for_device)(struct device *dev,
65371 dma_addr_t dma_handle,
65372 unsigned long offset,
65373 size_t size,
65374 enum dma_data_direction dir);
65375- void (*sync_sg_for_cpu)(struct device *dev,
65376+ void (* const sync_sg_for_cpu)(struct device *dev,
65377 struct scatterlist *sg, int nents,
65378 enum dma_data_direction dir);
65379- void (*sync_sg_for_device)(struct device *dev,
65380+ void (* const sync_sg_for_device)(struct device *dev,
65381 struct scatterlist *sg, int nents,
65382 enum dma_data_direction dir);
65383- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
65384- int (*dma_supported)(struct device *dev, u64 mask);
65385+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
65386+ int (* const dma_supported)(struct device *dev, u64 mask);
65387 int (*set_dma_mask)(struct device *dev, u64 mask);
65388 int is_phys;
65389-};
65390+} __do_const;
65391
65392 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
65393
65394diff --git a/include/linux/dst.h b/include/linux/dst.h
65395index e26fed8..b976d9f 100644
65396--- a/include/linux/dst.h
65397+++ b/include/linux/dst.h
65398@@ -380,7 +380,7 @@ struct dst_node
65399 struct thread_pool *pool;
65400
65401 /* Transaction IDs live here */
65402- atomic_long_t gen;
65403+ atomic_long_unchecked_t gen;
65404
65405 /*
65406 * How frequently and how many times transaction
65407diff --git a/include/linux/elf.h b/include/linux/elf.h
65408index 90a4ed0..d652617 100644
65409--- a/include/linux/elf.h
65410+++ b/include/linux/elf.h
65411@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
65412 #define PT_GNU_EH_FRAME 0x6474e550
65413
65414 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
65415+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
65416+
65417+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
65418+
65419+/* Constants for the e_flags field */
65420+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65421+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
65422+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
65423+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
65424+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65425+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65426
65427 /* These constants define the different elf file types */
65428 #define ET_NONE 0
65429@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
65430 #define DT_DEBUG 21
65431 #define DT_TEXTREL 22
65432 #define DT_JMPREL 23
65433+#define DT_FLAGS 30
65434+ #define DF_TEXTREL 0x00000004
65435 #define DT_ENCODING 32
65436 #define OLD_DT_LOOS 0x60000000
65437 #define DT_LOOS 0x6000000d
65438@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
65439 #define PF_W 0x2
65440 #define PF_X 0x1
65441
65442+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
65443+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
65444+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
65445+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
65446+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
65447+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
65448+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
65449+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
65450+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
65451+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
65452+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
65453+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
65454+
65455 typedef struct elf32_phdr{
65456 Elf32_Word p_type;
65457 Elf32_Off p_offset;
65458@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
65459 #define EI_OSABI 7
65460 #define EI_PAD 8
65461
65462+#define EI_PAX 14
65463+
65464 #define ELFMAG0 0x7f /* EI_MAG */
65465 #define ELFMAG1 'E'
65466 #define ELFMAG2 'L'
65467@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
65468 #define elf_phdr elf32_phdr
65469 #define elf_note elf32_note
65470 #define elf_addr_t Elf32_Off
65471+#define elf_dyn Elf32_Dyn
65472
65473 #else
65474
65475@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
65476 #define elf_phdr elf64_phdr
65477 #define elf_note elf64_note
65478 #define elf_addr_t Elf64_Off
65479+#define elf_dyn Elf64_Dyn
65480
65481 #endif
65482
65483diff --git a/include/linux/fs.h b/include/linux/fs.h
65484index 1b9a47a..6fe2934 100644
65485--- a/include/linux/fs.h
65486+++ b/include/linux/fs.h
65487@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
65488 unsigned long, unsigned long);
65489
65490 struct address_space_operations {
65491- int (*writepage)(struct page *page, struct writeback_control *wbc);
65492- int (*readpage)(struct file *, struct page *);
65493- void (*sync_page)(struct page *);
65494+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
65495+ int (* const readpage)(struct file *, struct page *);
65496+ void (* const sync_page)(struct page *);
65497
65498 /* Write back some dirty pages from this mapping. */
65499- int (*writepages)(struct address_space *, struct writeback_control *);
65500+ int (* const writepages)(struct address_space *, struct writeback_control *);
65501
65502 /* Set a page dirty. Return true if this dirtied it */
65503- int (*set_page_dirty)(struct page *page);
65504+ int (* const set_page_dirty)(struct page *page);
65505
65506- int (*readpages)(struct file *filp, struct address_space *mapping,
65507+ int (* const readpages)(struct file *filp, struct address_space *mapping,
65508 struct list_head *pages, unsigned nr_pages);
65509
65510- int (*write_begin)(struct file *, struct address_space *mapping,
65511+ int (* const write_begin)(struct file *, struct address_space *mapping,
65512 loff_t pos, unsigned len, unsigned flags,
65513 struct page **pagep, void **fsdata);
65514- int (*write_end)(struct file *, struct address_space *mapping,
65515+ int (* const write_end)(struct file *, struct address_space *mapping,
65516 loff_t pos, unsigned len, unsigned copied,
65517 struct page *page, void *fsdata);
65518
65519 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
65520- sector_t (*bmap)(struct address_space *, sector_t);
65521- void (*invalidatepage) (struct page *, unsigned long);
65522- int (*releasepage) (struct page *, gfp_t);
65523- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
65524+ sector_t (* const bmap)(struct address_space *, sector_t);
65525+ void (* const invalidatepage) (struct page *, unsigned long);
65526+ int (* const releasepage) (struct page *, gfp_t);
65527+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
65528 loff_t offset, unsigned long nr_segs);
65529- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
65530+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
65531 void **, unsigned long *);
65532 /* migrate the contents of a page to the specified target */
65533- int (*migratepage) (struct address_space *,
65534+ int (* const migratepage) (struct address_space *,
65535 struct page *, struct page *);
65536- int (*launder_page) (struct page *);
65537- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
65538+ int (* const launder_page) (struct page *);
65539+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
65540 unsigned long);
65541- int (*error_remove_page)(struct address_space *, struct page *);
65542+ int (* const error_remove_page)(struct address_space *, struct page *);
65543 };
65544
65545 /*
65546@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
65547 typedef struct files_struct *fl_owner_t;
65548
65549 struct file_lock_operations {
65550- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65551- void (*fl_release_private)(struct file_lock *);
65552+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65553+ void (* const fl_release_private)(struct file_lock *);
65554 };
65555
65556 struct lock_manager_operations {
65557- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
65558- void (*fl_notify)(struct file_lock *); /* unblock callback */
65559- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
65560- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65561- void (*fl_release_private)(struct file_lock *);
65562- void (*fl_break)(struct file_lock *);
65563- int (*fl_mylease)(struct file_lock *, struct file_lock *);
65564- int (*fl_change)(struct file_lock **, int);
65565+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
65566+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
65567+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
65568+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65569+ void (* const fl_release_private)(struct file_lock *);
65570+ void (* const fl_break)(struct file_lock *);
65571+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
65572+ int (* const fl_change)(struct file_lock **, int);
65573 };
65574
65575 struct lock_manager {
65576@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
65577 unsigned int fi_flags; /* Flags as passed from user */
65578 unsigned int fi_extents_mapped; /* Number of mapped extents */
65579 unsigned int fi_extents_max; /* Size of fiemap_extent array */
65580- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
65581+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
65582 * array */
65583 };
65584 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
65585@@ -1512,7 +1512,8 @@ struct file_operations {
65586 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
65587 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
65588 int (*setlease)(struct file *, long, struct file_lock **);
65589-};
65590+} __do_const;
65591+typedef struct file_operations __no_const file_operations_no_const;
65592
65593 struct inode_operations {
65594 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
65595@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
65596 unsigned long, loff_t *);
65597
65598 struct super_operations {
65599- struct inode *(*alloc_inode)(struct super_block *sb);
65600- void (*destroy_inode)(struct inode *);
65601-
65602- void (*dirty_inode) (struct inode *);
65603- int (*write_inode) (struct inode *, int);
65604- void (*drop_inode) (struct inode *);
65605- void (*delete_inode) (struct inode *);
65606- void (*put_super) (struct super_block *);
65607- void (*write_super) (struct super_block *);
65608- int (*sync_fs)(struct super_block *sb, int wait);
65609- int (*freeze_fs) (struct super_block *);
65610- int (*unfreeze_fs) (struct super_block *);
65611- int (*statfs) (struct dentry *, struct kstatfs *);
65612- int (*remount_fs) (struct super_block *, int *, char *);
65613- void (*clear_inode) (struct inode *);
65614- void (*umount_begin) (struct super_block *);
65615-
65616- int (*show_options)(struct seq_file *, struct vfsmount *);
65617- int (*show_stats)(struct seq_file *, struct vfsmount *);
65618+ struct inode *(* const alloc_inode)(struct super_block *sb);
65619+ void (* const destroy_inode)(struct inode *);
65620+
65621+ void (* const dirty_inode) (struct inode *);
65622+ int (* const write_inode) (struct inode *, int);
65623+ void (* const drop_inode) (struct inode *);
65624+ void (* const delete_inode) (struct inode *);
65625+ void (* const put_super) (struct super_block *);
65626+ void (* const write_super) (struct super_block *);
65627+ int (* const sync_fs)(struct super_block *sb, int wait);
65628+ int (* const freeze_fs) (struct super_block *);
65629+ int (* const unfreeze_fs) (struct super_block *);
65630+ int (* const statfs) (struct dentry *, struct kstatfs *);
65631+ int (* const remount_fs) (struct super_block *, int *, char *);
65632+ void (* const clear_inode) (struct inode *);
65633+ void (* const umount_begin) (struct super_block *);
65634+
65635+ int (* const show_options)(struct seq_file *, struct vfsmount *);
65636+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
65637 #ifdef CONFIG_QUOTA
65638- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
65639- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65640+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
65641+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65642 #endif
65643- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65644+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65645 };
65646
65647 /*
65648diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
65649index 78a05bf..2a7d3e1 100644
65650--- a/include/linux/fs_struct.h
65651+++ b/include/linux/fs_struct.h
65652@@ -4,7 +4,7 @@
65653 #include <linux/path.h>
65654
65655 struct fs_struct {
65656- int users;
65657+ atomic_t users;
65658 rwlock_t lock;
65659 int umask;
65660 int in_exec;
65661diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
65662index 7be0c6f..2f63a2b 100644
65663--- a/include/linux/fscache-cache.h
65664+++ b/include/linux/fscache-cache.h
65665@@ -116,7 +116,7 @@ struct fscache_operation {
65666 #endif
65667 };
65668
65669-extern atomic_t fscache_op_debug_id;
65670+extern atomic_unchecked_t fscache_op_debug_id;
65671 extern const struct slow_work_ops fscache_op_slow_work_ops;
65672
65673 extern void fscache_enqueue_operation(struct fscache_operation *);
65674@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
65675 fscache_operation_release_t release)
65676 {
65677 atomic_set(&op->usage, 1);
65678- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
65679+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
65680 op->release = release;
65681 INIT_LIST_HEAD(&op->pend_link);
65682 fscache_set_op_state(op, "Init");
65683diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
65684index 4ec5e67..42f1eb9 100644
65685--- a/include/linux/ftrace_event.h
65686+++ b/include/linux/ftrace_event.h
65687@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
65688 int filter_type);
65689 extern int trace_define_common_fields(struct ftrace_event_call *call);
65690
65691-#define is_signed_type(type) (((type)(-1)) < 0)
65692+#define is_signed_type(type) (((type)(-1)) < (type)1)
65693
65694 int trace_set_clr_event(const char *system, const char *event, int set);
65695
65696diff --git a/include/linux/genhd.h b/include/linux/genhd.h
65697index 297df45..b6a74ff 100644
65698--- a/include/linux/genhd.h
65699+++ b/include/linux/genhd.h
65700@@ -161,7 +161,7 @@ struct gendisk {
65701
65702 struct timer_rand_state *random;
65703
65704- atomic_t sync_io; /* RAID */
65705+ atomic_unchecked_t sync_io; /* RAID */
65706 struct work_struct async_notify;
65707 #ifdef CONFIG_BLK_DEV_INTEGRITY
65708 struct blk_integrity *integrity;
65709diff --git a/include/linux/gracl.h b/include/linux/gracl.h
65710new file mode 100644
65711index 0000000..0dc3943
65712--- /dev/null
65713+++ b/include/linux/gracl.h
65714@@ -0,0 +1,317 @@
65715+#ifndef GR_ACL_H
65716+#define GR_ACL_H
65717+
65718+#include <linux/grdefs.h>
65719+#include <linux/resource.h>
65720+#include <linux/capability.h>
65721+#include <linux/dcache.h>
65722+#include <asm/resource.h>
65723+
65724+/* Major status information */
65725+
65726+#define GR_VERSION "grsecurity 2.2.2"
65727+#define GRSECURITY_VERSION 0x2202
65728+
65729+enum {
65730+ GR_SHUTDOWN = 0,
65731+ GR_ENABLE = 1,
65732+ GR_SPROLE = 2,
65733+ GR_RELOAD = 3,
65734+ GR_SEGVMOD = 4,
65735+ GR_STATUS = 5,
65736+ GR_UNSPROLE = 6,
65737+ GR_PASSSET = 7,
65738+ GR_SPROLEPAM = 8,
65739+};
65740+
65741+/* Password setup definitions
65742+ * kernel/grhash.c */
65743+enum {
65744+ GR_PW_LEN = 128,
65745+ GR_SALT_LEN = 16,
65746+ GR_SHA_LEN = 32,
65747+};
65748+
65749+enum {
65750+ GR_SPROLE_LEN = 64,
65751+};
65752+
65753+enum {
65754+ GR_NO_GLOB = 0,
65755+ GR_REG_GLOB,
65756+ GR_CREATE_GLOB
65757+};
65758+
65759+#define GR_NLIMITS 32
65760+
65761+/* Begin Data Structures */
65762+
65763+struct sprole_pw {
65764+ unsigned char *rolename;
65765+ unsigned char salt[GR_SALT_LEN];
65766+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
65767+};
65768+
65769+struct name_entry {
65770+ __u32 key;
65771+ ino_t inode;
65772+ dev_t device;
65773+ char *name;
65774+ __u16 len;
65775+ __u8 deleted;
65776+ struct name_entry *prev;
65777+ struct name_entry *next;
65778+};
65779+
65780+struct inodev_entry {
65781+ struct name_entry *nentry;
65782+ struct inodev_entry *prev;
65783+ struct inodev_entry *next;
65784+};
65785+
65786+struct acl_role_db {
65787+ struct acl_role_label **r_hash;
65788+ __u32 r_size;
65789+};
65790+
65791+struct inodev_db {
65792+ struct inodev_entry **i_hash;
65793+ __u32 i_size;
65794+};
65795+
65796+struct name_db {
65797+ struct name_entry **n_hash;
65798+ __u32 n_size;
65799+};
65800+
65801+struct crash_uid {
65802+ uid_t uid;
65803+ unsigned long expires;
65804+};
65805+
65806+struct gr_hash_struct {
65807+ void **table;
65808+ void **nametable;
65809+ void *first;
65810+ __u32 table_size;
65811+ __u32 used_size;
65812+ int type;
65813+};
65814+
65815+/* Userspace Grsecurity ACL data structures */
65816+
65817+struct acl_subject_label {
65818+ char *filename;
65819+ ino_t inode;
65820+ dev_t device;
65821+ __u32 mode;
65822+ kernel_cap_t cap_mask;
65823+ kernel_cap_t cap_lower;
65824+ kernel_cap_t cap_invert_audit;
65825+
65826+ struct rlimit res[GR_NLIMITS];
65827+ __u32 resmask;
65828+
65829+ __u8 user_trans_type;
65830+ __u8 group_trans_type;
65831+ uid_t *user_transitions;
65832+ gid_t *group_transitions;
65833+ __u16 user_trans_num;
65834+ __u16 group_trans_num;
65835+
65836+ __u32 sock_families[2];
65837+ __u32 ip_proto[8];
65838+ __u32 ip_type;
65839+ struct acl_ip_label **ips;
65840+ __u32 ip_num;
65841+ __u32 inaddr_any_override;
65842+
65843+ __u32 crashes;
65844+ unsigned long expires;
65845+
65846+ struct acl_subject_label *parent_subject;
65847+ struct gr_hash_struct *hash;
65848+ struct acl_subject_label *prev;
65849+ struct acl_subject_label *next;
65850+
65851+ struct acl_object_label **obj_hash;
65852+ __u32 obj_hash_size;
65853+ __u16 pax_flags;
65854+};
65855+
65856+struct role_allowed_ip {
65857+ __u32 addr;
65858+ __u32 netmask;
65859+
65860+ struct role_allowed_ip *prev;
65861+ struct role_allowed_ip *next;
65862+};
65863+
65864+struct role_transition {
65865+ char *rolename;
65866+
65867+ struct role_transition *prev;
65868+ struct role_transition *next;
65869+};
65870+
65871+struct acl_role_label {
65872+ char *rolename;
65873+ uid_t uidgid;
65874+ __u16 roletype;
65875+
65876+ __u16 auth_attempts;
65877+ unsigned long expires;
65878+
65879+ struct acl_subject_label *root_label;
65880+ struct gr_hash_struct *hash;
65881+
65882+ struct acl_role_label *prev;
65883+ struct acl_role_label *next;
65884+
65885+ struct role_transition *transitions;
65886+ struct role_allowed_ip *allowed_ips;
65887+ uid_t *domain_children;
65888+ __u16 domain_child_num;
65889+
65890+ struct acl_subject_label **subj_hash;
65891+ __u32 subj_hash_size;
65892+};
65893+
65894+struct user_acl_role_db {
65895+ struct acl_role_label **r_table;
65896+ __u32 num_pointers; /* Number of allocations to track */
65897+ __u32 num_roles; /* Number of roles */
65898+ __u32 num_domain_children; /* Number of domain children */
65899+ __u32 num_subjects; /* Number of subjects */
65900+ __u32 num_objects; /* Number of objects */
65901+};
65902+
65903+struct acl_object_label {
65904+ char *filename;
65905+ ino_t inode;
65906+ dev_t device;
65907+ __u32 mode;
65908+
65909+ struct acl_subject_label *nested;
65910+ struct acl_object_label *globbed;
65911+
65912+ /* next two structures not used */
65913+
65914+ struct acl_object_label *prev;
65915+ struct acl_object_label *next;
65916+};
65917+
65918+struct acl_ip_label {
65919+ char *iface;
65920+ __u32 addr;
65921+ __u32 netmask;
65922+ __u16 low, high;
65923+ __u8 mode;
65924+ __u32 type;
65925+ __u32 proto[8];
65926+
65927+ /* next two structures not used */
65928+
65929+ struct acl_ip_label *prev;
65930+ struct acl_ip_label *next;
65931+};
65932+
65933+struct gr_arg {
65934+ struct user_acl_role_db role_db;
65935+ unsigned char pw[GR_PW_LEN];
65936+ unsigned char salt[GR_SALT_LEN];
65937+ unsigned char sum[GR_SHA_LEN];
65938+ unsigned char sp_role[GR_SPROLE_LEN];
65939+ struct sprole_pw *sprole_pws;
65940+ dev_t segv_device;
65941+ ino_t segv_inode;
65942+ uid_t segv_uid;
65943+ __u16 num_sprole_pws;
65944+ __u16 mode;
65945+};
65946+
65947+struct gr_arg_wrapper {
65948+ struct gr_arg *arg;
65949+ __u32 version;
65950+ __u32 size;
65951+};
65952+
65953+struct subject_map {
65954+ struct acl_subject_label *user;
65955+ struct acl_subject_label *kernel;
65956+ struct subject_map *prev;
65957+ struct subject_map *next;
65958+};
65959+
65960+struct acl_subj_map_db {
65961+ struct subject_map **s_hash;
65962+ __u32 s_size;
65963+};
65964+
65965+/* End Data Structures Section */
65966+
65967+/* Hash functions generated by empirical testing by Brad Spengler
65968+ Makes good use of the low bits of the inode. Generally 0-1 times
65969+ in loop for successful match. 0-3 for unsuccessful match.
65970+ Shift/add algorithm with modulus of table size and an XOR*/
65971+
65972+static __inline__ unsigned int
65973+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
65974+{
65975+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
65976+}
65977+
65978+ static __inline__ unsigned int
65979+shash(const struct acl_subject_label *userp, const unsigned int sz)
65980+{
65981+ return ((const unsigned long)userp % sz);
65982+}
65983+
65984+static __inline__ unsigned int
65985+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
65986+{
65987+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
65988+}
65989+
65990+static __inline__ unsigned int
65991+nhash(const char *name, const __u16 len, const unsigned int sz)
65992+{
65993+ return full_name_hash((const unsigned char *)name, len) % sz;
65994+}
65995+
65996+#define FOR_EACH_ROLE_START(role) \
65997+ role = role_list; \
65998+ while (role) {
65999+
66000+#define FOR_EACH_ROLE_END(role) \
66001+ role = role->prev; \
66002+ }
66003+
66004+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66005+ subj = NULL; \
66006+ iter = 0; \
66007+ while (iter < role->subj_hash_size) { \
66008+ if (subj == NULL) \
66009+ subj = role->subj_hash[iter]; \
66010+ if (subj == NULL) { \
66011+ iter++; \
66012+ continue; \
66013+ }
66014+
66015+#define FOR_EACH_SUBJECT_END(subj,iter) \
66016+ subj = subj->next; \
66017+ if (subj == NULL) \
66018+ iter++; \
66019+ }
66020+
66021+
66022+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66023+ subj = role->hash->first; \
66024+ while (subj != NULL) {
66025+
66026+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66027+ subj = subj->next; \
66028+ }
66029+
66030+#endif
66031+
66032diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66033new file mode 100644
66034index 0000000..323ecf2
66035--- /dev/null
66036+++ b/include/linux/gralloc.h
66037@@ -0,0 +1,9 @@
66038+#ifndef __GRALLOC_H
66039+#define __GRALLOC_H
66040+
66041+void acl_free_all(void);
66042+int acl_alloc_stack_init(unsigned long size);
66043+void *acl_alloc(unsigned long len);
66044+void *acl_alloc_num(unsigned long num, unsigned long len);
66045+
66046+#endif
66047diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66048new file mode 100644
66049index 0000000..70d6cd5
66050--- /dev/null
66051+++ b/include/linux/grdefs.h
66052@@ -0,0 +1,140 @@
66053+#ifndef GRDEFS_H
66054+#define GRDEFS_H
66055+
66056+/* Begin grsecurity status declarations */
66057+
66058+enum {
66059+ GR_READY = 0x01,
66060+ GR_STATUS_INIT = 0x00 // disabled state
66061+};
66062+
66063+/* Begin ACL declarations */
66064+
66065+/* Role flags */
66066+
66067+enum {
66068+ GR_ROLE_USER = 0x0001,
66069+ GR_ROLE_GROUP = 0x0002,
66070+ GR_ROLE_DEFAULT = 0x0004,
66071+ GR_ROLE_SPECIAL = 0x0008,
66072+ GR_ROLE_AUTH = 0x0010,
66073+ GR_ROLE_NOPW = 0x0020,
66074+ GR_ROLE_GOD = 0x0040,
66075+ GR_ROLE_LEARN = 0x0080,
66076+ GR_ROLE_TPE = 0x0100,
66077+ GR_ROLE_DOMAIN = 0x0200,
66078+ GR_ROLE_PAM = 0x0400,
66079+ GR_ROLE_PERSIST = 0x800
66080+};
66081+
66082+/* ACL Subject and Object mode flags */
66083+enum {
66084+ GR_DELETED = 0x80000000
66085+};
66086+
66087+/* ACL Object-only mode flags */
66088+enum {
66089+ GR_READ = 0x00000001,
66090+ GR_APPEND = 0x00000002,
66091+ GR_WRITE = 0x00000004,
66092+ GR_EXEC = 0x00000008,
66093+ GR_FIND = 0x00000010,
66094+ GR_INHERIT = 0x00000020,
66095+ GR_SETID = 0x00000040,
66096+ GR_CREATE = 0x00000080,
66097+ GR_DELETE = 0x00000100,
66098+ GR_LINK = 0x00000200,
66099+ GR_AUDIT_READ = 0x00000400,
66100+ GR_AUDIT_APPEND = 0x00000800,
66101+ GR_AUDIT_WRITE = 0x00001000,
66102+ GR_AUDIT_EXEC = 0x00002000,
66103+ GR_AUDIT_FIND = 0x00004000,
66104+ GR_AUDIT_INHERIT= 0x00008000,
66105+ GR_AUDIT_SETID = 0x00010000,
66106+ GR_AUDIT_CREATE = 0x00020000,
66107+ GR_AUDIT_DELETE = 0x00040000,
66108+ GR_AUDIT_LINK = 0x00080000,
66109+ GR_PTRACERD = 0x00100000,
66110+ GR_NOPTRACE = 0x00200000,
66111+ GR_SUPPRESS = 0x00400000,
66112+ GR_NOLEARN = 0x00800000,
66113+ GR_INIT_TRANSFER= 0x01000000
66114+};
66115+
66116+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66117+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66118+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66119+
66120+/* ACL subject-only mode flags */
66121+enum {
66122+ GR_KILL = 0x00000001,
66123+ GR_VIEW = 0x00000002,
66124+ GR_PROTECTED = 0x00000004,
66125+ GR_LEARN = 0x00000008,
66126+ GR_OVERRIDE = 0x00000010,
66127+ /* just a placeholder, this mode is only used in userspace */
66128+ GR_DUMMY = 0x00000020,
66129+ GR_PROTSHM = 0x00000040,
66130+ GR_KILLPROC = 0x00000080,
66131+ GR_KILLIPPROC = 0x00000100,
66132+ /* just a placeholder, this mode is only used in userspace */
66133+ GR_NOTROJAN = 0x00000200,
66134+ GR_PROTPROCFD = 0x00000400,
66135+ GR_PROCACCT = 0x00000800,
66136+ GR_RELAXPTRACE = 0x00001000,
66137+ GR_NESTED = 0x00002000,
66138+ GR_INHERITLEARN = 0x00004000,
66139+ GR_PROCFIND = 0x00008000,
66140+ GR_POVERRIDE = 0x00010000,
66141+ GR_KERNELAUTH = 0x00020000,
66142+ GR_ATSECURE = 0x00040000,
66143+ GR_SHMEXEC = 0x00080000
66144+};
66145+
66146+enum {
66147+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66148+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66149+ GR_PAX_ENABLE_MPROTECT = 0x0004,
66150+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
66151+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66152+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66153+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66154+ GR_PAX_DISABLE_MPROTECT = 0x0400,
66155+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
66156+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66157+};
66158+
66159+enum {
66160+ GR_ID_USER = 0x01,
66161+ GR_ID_GROUP = 0x02,
66162+};
66163+
66164+enum {
66165+ GR_ID_ALLOW = 0x01,
66166+ GR_ID_DENY = 0x02,
66167+};
66168+
66169+#define GR_CRASH_RES 31
66170+#define GR_UIDTABLE_MAX 500
66171+
66172+/* begin resource learning section */
66173+enum {
66174+ GR_RLIM_CPU_BUMP = 60,
66175+ GR_RLIM_FSIZE_BUMP = 50000,
66176+ GR_RLIM_DATA_BUMP = 10000,
66177+ GR_RLIM_STACK_BUMP = 1000,
66178+ GR_RLIM_CORE_BUMP = 10000,
66179+ GR_RLIM_RSS_BUMP = 500000,
66180+ GR_RLIM_NPROC_BUMP = 1,
66181+ GR_RLIM_NOFILE_BUMP = 5,
66182+ GR_RLIM_MEMLOCK_BUMP = 50000,
66183+ GR_RLIM_AS_BUMP = 500000,
66184+ GR_RLIM_LOCKS_BUMP = 2,
66185+ GR_RLIM_SIGPENDING_BUMP = 5,
66186+ GR_RLIM_MSGQUEUE_BUMP = 10000,
66187+ GR_RLIM_NICE_BUMP = 1,
66188+ GR_RLIM_RTPRIO_BUMP = 1,
66189+ GR_RLIM_RTTIME_BUMP = 1000000
66190+};
66191+
66192+#endif
66193diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
66194new file mode 100644
66195index 0000000..e5817d7
66196--- /dev/null
66197+++ b/include/linux/grinternal.h
66198@@ -0,0 +1,218 @@
66199+#ifndef __GRINTERNAL_H
66200+#define __GRINTERNAL_H
66201+
66202+#ifdef CONFIG_GRKERNSEC
66203+
66204+#include <linux/fs.h>
66205+#include <linux/mnt_namespace.h>
66206+#include <linux/nsproxy.h>
66207+#include <linux/gracl.h>
66208+#include <linux/grdefs.h>
66209+#include <linux/grmsg.h>
66210+
66211+void gr_add_learn_entry(const char *fmt, ...)
66212+ __attribute__ ((format (printf, 1, 2)));
66213+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
66214+ const struct vfsmount *mnt);
66215+__u32 gr_check_create(const struct dentry *new_dentry,
66216+ const struct dentry *parent,
66217+ const struct vfsmount *mnt, const __u32 mode);
66218+int gr_check_protected_task(const struct task_struct *task);
66219+__u32 to_gr_audit(const __u32 reqmode);
66220+int gr_set_acls(const int type);
66221+int gr_apply_subject_to_task(struct task_struct *task);
66222+int gr_acl_is_enabled(void);
66223+char gr_roletype_to_char(void);
66224+
66225+void gr_handle_alertkill(struct task_struct *task);
66226+char *gr_to_filename(const struct dentry *dentry,
66227+ const struct vfsmount *mnt);
66228+char *gr_to_filename1(const struct dentry *dentry,
66229+ const struct vfsmount *mnt);
66230+char *gr_to_filename2(const struct dentry *dentry,
66231+ const struct vfsmount *mnt);
66232+char *gr_to_filename3(const struct dentry *dentry,
66233+ const struct vfsmount *mnt);
66234+
66235+extern int grsec_enable_harden_ptrace;
66236+extern int grsec_enable_link;
66237+extern int grsec_enable_fifo;
66238+extern int grsec_enable_shm;
66239+extern int grsec_enable_execlog;
66240+extern int grsec_enable_signal;
66241+extern int grsec_enable_audit_ptrace;
66242+extern int grsec_enable_forkfail;
66243+extern int grsec_enable_time;
66244+extern int grsec_enable_rofs;
66245+extern int grsec_enable_chroot_shmat;
66246+extern int grsec_enable_chroot_mount;
66247+extern int grsec_enable_chroot_double;
66248+extern int grsec_enable_chroot_pivot;
66249+extern int grsec_enable_chroot_chdir;
66250+extern int grsec_enable_chroot_chmod;
66251+extern int grsec_enable_chroot_mknod;
66252+extern int grsec_enable_chroot_fchdir;
66253+extern int grsec_enable_chroot_nice;
66254+extern int grsec_enable_chroot_execlog;
66255+extern int grsec_enable_chroot_caps;
66256+extern int grsec_enable_chroot_sysctl;
66257+extern int grsec_enable_chroot_unix;
66258+extern int grsec_enable_tpe;
66259+extern int grsec_tpe_gid;
66260+extern int grsec_enable_tpe_all;
66261+extern int grsec_enable_tpe_invert;
66262+extern int grsec_enable_socket_all;
66263+extern int grsec_socket_all_gid;
66264+extern int grsec_enable_socket_client;
66265+extern int grsec_socket_client_gid;
66266+extern int grsec_enable_socket_server;
66267+extern int grsec_socket_server_gid;
66268+extern int grsec_audit_gid;
66269+extern int grsec_enable_group;
66270+extern int grsec_enable_audit_textrel;
66271+extern int grsec_enable_log_rwxmaps;
66272+extern int grsec_enable_mount;
66273+extern int grsec_enable_chdir;
66274+extern int grsec_resource_logging;
66275+extern int grsec_enable_blackhole;
66276+extern int grsec_lastack_retries;
66277+extern int grsec_enable_brute;
66278+extern int grsec_lock;
66279+
66280+extern spinlock_t grsec_alert_lock;
66281+extern unsigned long grsec_alert_wtime;
66282+extern unsigned long grsec_alert_fyet;
66283+
66284+extern spinlock_t grsec_audit_lock;
66285+
66286+extern rwlock_t grsec_exec_file_lock;
66287+
66288+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
66289+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
66290+ (tsk)->exec_file->f_vfsmnt) : "/")
66291+
66292+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
66293+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
66294+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66295+
66296+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
66297+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
66298+ (tsk)->exec_file->f_vfsmnt) : "/")
66299+
66300+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
66301+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
66302+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66303+
66304+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
66305+
66306+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
66307+
66308+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
66309+ (task)->pid, (cred)->uid, \
66310+ (cred)->euid, (cred)->gid, (cred)->egid, \
66311+ gr_parent_task_fullpath(task), \
66312+ (task)->real_parent->comm, (task)->real_parent->pid, \
66313+ (pcred)->uid, (pcred)->euid, \
66314+ (pcred)->gid, (pcred)->egid
66315+
66316+#define GR_CHROOT_CAPS {{ \
66317+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
66318+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
66319+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
66320+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
66321+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
66322+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
66323+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
66324+
66325+#define security_learn(normal_msg,args...) \
66326+({ \
66327+ read_lock(&grsec_exec_file_lock); \
66328+ gr_add_learn_entry(normal_msg "\n", ## args); \
66329+ read_unlock(&grsec_exec_file_lock); \
66330+})
66331+
66332+enum {
66333+ GR_DO_AUDIT,
66334+ GR_DONT_AUDIT,
66335+ GR_DONT_AUDIT_GOOD
66336+};
66337+
66338+enum {
66339+ GR_TTYSNIFF,
66340+ GR_RBAC,
66341+ GR_RBAC_STR,
66342+ GR_STR_RBAC,
66343+ GR_RBAC_MODE2,
66344+ GR_RBAC_MODE3,
66345+ GR_FILENAME,
66346+ GR_SYSCTL_HIDDEN,
66347+ GR_NOARGS,
66348+ GR_ONE_INT,
66349+ GR_ONE_INT_TWO_STR,
66350+ GR_ONE_STR,
66351+ GR_STR_INT,
66352+ GR_TWO_STR_INT,
66353+ GR_TWO_INT,
66354+ GR_TWO_U64,
66355+ GR_THREE_INT,
66356+ GR_FIVE_INT_TWO_STR,
66357+ GR_TWO_STR,
66358+ GR_THREE_STR,
66359+ GR_FOUR_STR,
66360+ GR_STR_FILENAME,
66361+ GR_FILENAME_STR,
66362+ GR_FILENAME_TWO_INT,
66363+ GR_FILENAME_TWO_INT_STR,
66364+ GR_TEXTREL,
66365+ GR_PTRACE,
66366+ GR_RESOURCE,
66367+ GR_CAP,
66368+ GR_SIG,
66369+ GR_SIG2,
66370+ GR_CRASH1,
66371+ GR_CRASH2,
66372+ GR_PSACCT,
66373+ GR_RWXMAP
66374+};
66375+
66376+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
66377+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
66378+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
66379+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
66380+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
66381+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
66382+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
66383+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
66384+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
66385+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
66386+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
66387+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
66388+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
66389+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
66390+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
66391+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
66392+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
66393+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
66394+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
66395+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
66396+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
66397+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
66398+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
66399+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
66400+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
66401+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
66402+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
66403+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
66404+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
66405+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
66406+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
66407+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
66408+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
66409+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
66410+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
66411+
66412+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
66413+
66414+#endif
66415+
66416+#endif
66417diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
66418new file mode 100644
66419index 0000000..9d5fd4a
66420--- /dev/null
66421+++ b/include/linux/grmsg.h
66422@@ -0,0 +1,108 @@
66423+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
66424+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
66425+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
66426+#define GR_STOPMOD_MSG "denied modification of module state by "
66427+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
66428+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
66429+#define GR_IOPERM_MSG "denied use of ioperm() by "
66430+#define GR_IOPL_MSG "denied use of iopl() by "
66431+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
66432+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
66433+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
66434+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
66435+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
66436+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
66437+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
66438+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
66439+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
66440+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
66441+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
66442+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
66443+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
66444+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
66445+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
66446+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
66447+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
66448+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
66449+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
66450+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
66451+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
66452+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
66453+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
66454+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
66455+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
66456+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
66457+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
66458+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
66459+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
66460+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
66461+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
66462+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
66463+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
66464+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
66465+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
66466+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
66467+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
66468+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
66469+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
66470+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
66471+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
66472+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
66473+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
66474+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
66475+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
66476+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
66477+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
66478+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
66479+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
66480+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
66481+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
66482+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
66483+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
66484+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
66485+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
66486+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
66487+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
66488+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
66489+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
66490+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
66491+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
66492+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
66493+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
66494+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
66495+#define GR_FAILFORK_MSG "failed fork with errno %s by "
66496+#define GR_NICE_CHROOT_MSG "denied priority change by "
66497+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
66498+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
66499+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
66500+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
66501+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
66502+#define GR_TIME_MSG "time set by "
66503+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
66504+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
66505+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
66506+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
66507+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
66508+#define GR_BIND_MSG "denied bind() by "
66509+#define GR_CONNECT_MSG "denied connect() by "
66510+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
66511+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
66512+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
66513+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
66514+#define GR_CAP_ACL_MSG "use of %s denied for "
66515+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
66516+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
66517+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
66518+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
66519+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
66520+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
66521+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
66522+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
66523+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
66524+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
66525+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
66526+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
66527+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
66528+#define GR_VM86_MSG "denied use of vm86 by "
66529+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
66530+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
66531diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
66532new file mode 100644
66533index 0000000..24676f4
66534--- /dev/null
66535+++ b/include/linux/grsecurity.h
66536@@ -0,0 +1,218 @@
66537+#ifndef GR_SECURITY_H
66538+#define GR_SECURITY_H
66539+#include <linux/fs.h>
66540+#include <linux/fs_struct.h>
66541+#include <linux/binfmts.h>
66542+#include <linux/gracl.h>
66543+#include <linux/compat.h>
66544+
66545+/* notify of brain-dead configs */
66546+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66547+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
66548+#endif
66549+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
66550+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
66551+#endif
66552+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66553+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66554+#endif
66555+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66556+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66557+#endif
66558+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
66559+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
66560+#endif
66561+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
66562+#error "CONFIG_PAX enabled, but no PaX options are enabled."
66563+#endif
66564+
66565+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
66566+void gr_handle_brute_check(void);
66567+void gr_handle_kernel_exploit(void);
66568+int gr_process_user_ban(void);
66569+
66570+char gr_roletype_to_char(void);
66571+
66572+int gr_acl_enable_at_secure(void);
66573+
66574+int gr_check_user_change(int real, int effective, int fs);
66575+int gr_check_group_change(int real, int effective, int fs);
66576+
66577+void gr_del_task_from_ip_table(struct task_struct *p);
66578+
66579+int gr_pid_is_chrooted(struct task_struct *p);
66580+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
66581+int gr_handle_chroot_nice(void);
66582+int gr_handle_chroot_sysctl(const int op);
66583+int gr_handle_chroot_setpriority(struct task_struct *p,
66584+ const int niceval);
66585+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
66586+int gr_handle_chroot_chroot(const struct dentry *dentry,
66587+ const struct vfsmount *mnt);
66588+void gr_handle_chroot_chdir(struct path *path);
66589+int gr_handle_chroot_chmod(const struct dentry *dentry,
66590+ const struct vfsmount *mnt, const int mode);
66591+int gr_handle_chroot_mknod(const struct dentry *dentry,
66592+ const struct vfsmount *mnt, const int mode);
66593+int gr_handle_chroot_mount(const struct dentry *dentry,
66594+ const struct vfsmount *mnt,
66595+ const char *dev_name);
66596+int gr_handle_chroot_pivot(void);
66597+int gr_handle_chroot_unix(const pid_t pid);
66598+
66599+int gr_handle_rawio(const struct inode *inode);
66600+
66601+void gr_handle_ioperm(void);
66602+void gr_handle_iopl(void);
66603+
66604+int gr_tpe_allow(const struct file *file);
66605+
66606+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
66607+void gr_clear_chroot_entries(struct task_struct *task);
66608+
66609+void gr_log_forkfail(const int retval);
66610+void gr_log_timechange(void);
66611+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
66612+void gr_log_chdir(const struct dentry *dentry,
66613+ const struct vfsmount *mnt);
66614+void gr_log_chroot_exec(const struct dentry *dentry,
66615+ const struct vfsmount *mnt);
66616+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
66617+#ifdef CONFIG_COMPAT
66618+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
66619+#endif
66620+void gr_log_remount(const char *devname, const int retval);
66621+void gr_log_unmount(const char *devname, const int retval);
66622+void gr_log_mount(const char *from, const char *to, const int retval);
66623+void gr_log_textrel(struct vm_area_struct *vma);
66624+void gr_log_rwxmmap(struct file *file);
66625+void gr_log_rwxmprotect(struct file *file);
66626+
66627+int gr_handle_follow_link(const struct inode *parent,
66628+ const struct inode *inode,
66629+ const struct dentry *dentry,
66630+ const struct vfsmount *mnt);
66631+int gr_handle_fifo(const struct dentry *dentry,
66632+ const struct vfsmount *mnt,
66633+ const struct dentry *dir, const int flag,
66634+ const int acc_mode);
66635+int gr_handle_hardlink(const struct dentry *dentry,
66636+ const struct vfsmount *mnt,
66637+ struct inode *inode,
66638+ const int mode, const char *to);
66639+
66640+int gr_is_capable(const int cap);
66641+int gr_is_capable_nolog(const int cap);
66642+void gr_learn_resource(const struct task_struct *task, const int limit,
66643+ const unsigned long wanted, const int gt);
66644+void gr_copy_label(struct task_struct *tsk);
66645+void gr_handle_crash(struct task_struct *task, const int sig);
66646+int gr_handle_signal(const struct task_struct *p, const int sig);
66647+int gr_check_crash_uid(const uid_t uid);
66648+int gr_check_protected_task(const struct task_struct *task);
66649+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
66650+int gr_acl_handle_mmap(const struct file *file,
66651+ const unsigned long prot);
66652+int gr_acl_handle_mprotect(const struct file *file,
66653+ const unsigned long prot);
66654+int gr_check_hidden_task(const struct task_struct *tsk);
66655+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
66656+ const struct vfsmount *mnt);
66657+__u32 gr_acl_handle_utime(const struct dentry *dentry,
66658+ const struct vfsmount *mnt);
66659+__u32 gr_acl_handle_access(const struct dentry *dentry,
66660+ const struct vfsmount *mnt, const int fmode);
66661+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
66662+ const struct vfsmount *mnt, mode_t mode);
66663+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
66664+ const struct vfsmount *mnt, mode_t mode);
66665+__u32 gr_acl_handle_chown(const struct dentry *dentry,
66666+ const struct vfsmount *mnt);
66667+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
66668+ const struct vfsmount *mnt);
66669+int gr_handle_ptrace(struct task_struct *task, const long request);
66670+int gr_handle_proc_ptrace(struct task_struct *task);
66671+__u32 gr_acl_handle_execve(const struct dentry *dentry,
66672+ const struct vfsmount *mnt);
66673+int gr_check_crash_exec(const struct file *filp);
66674+int gr_acl_is_enabled(void);
66675+void gr_set_kernel_label(struct task_struct *task);
66676+void gr_set_role_label(struct task_struct *task, const uid_t uid,
66677+ const gid_t gid);
66678+int gr_set_proc_label(const struct dentry *dentry,
66679+ const struct vfsmount *mnt,
66680+ const int unsafe_share);
66681+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
66682+ const struct vfsmount *mnt);
66683+__u32 gr_acl_handle_open(const struct dentry *dentry,
66684+ const struct vfsmount *mnt, int acc_mode);
66685+__u32 gr_acl_handle_creat(const struct dentry *dentry,
66686+ const struct dentry *p_dentry,
66687+ const struct vfsmount *p_mnt,
66688+ int open_flags, int acc_mode, const int imode);
66689+void gr_handle_create(const struct dentry *dentry,
66690+ const struct vfsmount *mnt);
66691+void gr_handle_proc_create(const struct dentry *dentry,
66692+ const struct inode *inode);
66693+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
66694+ const struct dentry *parent_dentry,
66695+ const struct vfsmount *parent_mnt,
66696+ const int mode);
66697+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
66698+ const struct dentry *parent_dentry,
66699+ const struct vfsmount *parent_mnt);
66700+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
66701+ const struct vfsmount *mnt);
66702+void gr_handle_delete(const ino_t ino, const dev_t dev);
66703+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
66704+ const struct vfsmount *mnt);
66705+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
66706+ const struct dentry *parent_dentry,
66707+ const struct vfsmount *parent_mnt,
66708+ const char *from);
66709+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
66710+ const struct dentry *parent_dentry,
66711+ const struct vfsmount *parent_mnt,
66712+ const struct dentry *old_dentry,
66713+ const struct vfsmount *old_mnt, const char *to);
66714+int gr_acl_handle_rename(struct dentry *new_dentry,
66715+ struct dentry *parent_dentry,
66716+ const struct vfsmount *parent_mnt,
66717+ struct dentry *old_dentry,
66718+ struct inode *old_parent_inode,
66719+ struct vfsmount *old_mnt, const char *newname);
66720+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
66721+ struct dentry *old_dentry,
66722+ struct dentry *new_dentry,
66723+ struct vfsmount *mnt, const __u8 replace);
66724+__u32 gr_check_link(const struct dentry *new_dentry,
66725+ const struct dentry *parent_dentry,
66726+ const struct vfsmount *parent_mnt,
66727+ const struct dentry *old_dentry,
66728+ const struct vfsmount *old_mnt);
66729+int gr_acl_handle_filldir(const struct file *file, const char *name,
66730+ const unsigned int namelen, const ino_t ino);
66731+
66732+__u32 gr_acl_handle_unix(const struct dentry *dentry,
66733+ const struct vfsmount *mnt);
66734+void gr_acl_handle_exit(void);
66735+void gr_acl_handle_psacct(struct task_struct *task, const long code);
66736+int gr_acl_handle_procpidmem(const struct task_struct *task);
66737+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
66738+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
66739+void gr_audit_ptrace(struct task_struct *task);
66740+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
66741+
66742+#ifdef CONFIG_GRKERNSEC
66743+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
66744+void gr_handle_vm86(void);
66745+void gr_handle_mem_readwrite(u64 from, u64 to);
66746+
66747+extern int grsec_enable_dmesg;
66748+extern int grsec_disable_privio;
66749+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66750+extern int grsec_enable_chroot_findtask;
66751+#endif
66752+#endif
66753+
66754+#endif
66755diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
66756index 6a87154..a3ce57b 100644
66757--- a/include/linux/hdpu_features.h
66758+++ b/include/linux/hdpu_features.h
66759@@ -3,7 +3,7 @@
66760 struct cpustate_t {
66761 spinlock_t lock;
66762 int excl;
66763- int open_count;
66764+ atomic_t open_count;
66765 unsigned char cached_val;
66766 int inited;
66767 unsigned long *set_addr;
66768diff --git a/include/linux/highmem.h b/include/linux/highmem.h
66769index 211ff44..00ab6d7 100644
66770--- a/include/linux/highmem.h
66771+++ b/include/linux/highmem.h
66772@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
66773 kunmap_atomic(kaddr, KM_USER0);
66774 }
66775
66776+static inline void sanitize_highpage(struct page *page)
66777+{
66778+ void *kaddr;
66779+ unsigned long flags;
66780+
66781+ local_irq_save(flags);
66782+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
66783+ clear_page(kaddr);
66784+ kunmap_atomic(kaddr, KM_CLEARPAGE);
66785+ local_irq_restore(flags);
66786+}
66787+
66788 static inline void zero_user_segments(struct page *page,
66789 unsigned start1, unsigned end1,
66790 unsigned start2, unsigned end2)
66791diff --git a/include/linux/i2c.h b/include/linux/i2c.h
66792index 7b40cda..24eb44e 100644
66793--- a/include/linux/i2c.h
66794+++ b/include/linux/i2c.h
66795@@ -325,6 +325,7 @@ struct i2c_algorithm {
66796 /* To determine what the adapter supports */
66797 u32 (*functionality) (struct i2c_adapter *);
66798 };
66799+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
66800
66801 /*
66802 * i2c_adapter is the structure used to identify a physical i2c bus along
66803diff --git a/include/linux/i2o.h b/include/linux/i2o.h
66804index 4c4e57d..f3c5303 100644
66805--- a/include/linux/i2o.h
66806+++ b/include/linux/i2o.h
66807@@ -564,7 +564,7 @@ struct i2o_controller {
66808 struct i2o_device *exec; /* Executive */
66809 #if BITS_PER_LONG == 64
66810 spinlock_t context_list_lock; /* lock for context_list */
66811- atomic_t context_list_counter; /* needed for unique contexts */
66812+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
66813 struct list_head context_list; /* list of context id's
66814 and pointers */
66815 #endif
66816diff --git a/include/linux/init_task.h b/include/linux/init_task.h
66817index 21a6f5d..dc42eab 100644
66818--- a/include/linux/init_task.h
66819+++ b/include/linux/init_task.h
66820@@ -83,6 +83,12 @@ extern struct group_info init_groups;
66821 #define INIT_IDS
66822 #endif
66823
66824+#ifdef CONFIG_X86
66825+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
66826+#else
66827+#define INIT_TASK_THREAD_INFO
66828+#endif
66829+
66830 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
66831 /*
66832 * Because of the reduced scope of CAP_SETPCAP when filesystem
66833@@ -156,6 +162,7 @@ extern struct cred init_cred;
66834 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
66835 .comm = "swapper", \
66836 .thread = INIT_THREAD, \
66837+ INIT_TASK_THREAD_INFO \
66838 .fs = &init_fs, \
66839 .files = &init_files, \
66840 .signal = &init_signals, \
66841diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
66842index 4f0a72a..a849599 100644
66843--- a/include/linux/intel-iommu.h
66844+++ b/include/linux/intel-iommu.h
66845@@ -296,7 +296,7 @@ struct iommu_flush {
66846 u8 fm, u64 type);
66847 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
66848 unsigned int size_order, u64 type);
66849-};
66850+} __no_const;
66851
66852 enum {
66853 SR_DMAR_FECTL_REG,
66854diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
66855index c739150..be577b5 100644
66856--- a/include/linux/interrupt.h
66857+++ b/include/linux/interrupt.h
66858@@ -369,7 +369,7 @@ enum
66859 /* map softirq index to softirq name. update 'softirq_to_name' in
66860 * kernel/softirq.c when adding a new softirq.
66861 */
66862-extern char *softirq_to_name[NR_SOFTIRQS];
66863+extern const char * const softirq_to_name[NR_SOFTIRQS];
66864
66865 /* softirq mask and active fields moved to irq_cpustat_t in
66866 * asm/hardirq.h to get better cache usage. KAO
66867@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
66868
66869 struct softirq_action
66870 {
66871- void (*action)(struct softirq_action *);
66872+ void (*action)(void);
66873 };
66874
66875 asmlinkage void do_softirq(void);
66876 asmlinkage void __do_softirq(void);
66877-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
66878+extern void open_softirq(int nr, void (*action)(void));
66879 extern void softirq_init(void);
66880 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
66881 extern void raise_softirq_irqoff(unsigned int nr);
66882diff --git a/include/linux/irq.h b/include/linux/irq.h
66883index 9e5f45a..025865b 100644
66884--- a/include/linux/irq.h
66885+++ b/include/linux/irq.h
66886@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
66887 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
66888 bool boot)
66889 {
66890+#ifdef CONFIG_CPUMASK_OFFSTACK
66891 gfp_t gfp = GFP_ATOMIC;
66892
66893 if (boot)
66894 gfp = GFP_NOWAIT;
66895
66896-#ifdef CONFIG_CPUMASK_OFFSTACK
66897 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
66898 return false;
66899
66900diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
66901index 7922742..27306a2 100644
66902--- a/include/linux/kallsyms.h
66903+++ b/include/linux/kallsyms.h
66904@@ -15,7 +15,8 @@
66905
66906 struct module;
66907
66908-#ifdef CONFIG_KALLSYMS
66909+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
66910+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66911 /* Lookup the address for a symbol. Returns 0 if not found. */
66912 unsigned long kallsyms_lookup_name(const char *name);
66913
66914@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
66915 /* Stupid that this does nothing, but I didn't create this mess. */
66916 #define __print_symbol(fmt, addr)
66917 #endif /*CONFIG_KALLSYMS*/
66918+#else /* when included by kallsyms.c, vsnprintf.c, or
66919+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
66920+extern void __print_symbol(const char *fmt, unsigned long address);
66921+extern int sprint_symbol(char *buffer, unsigned long address);
66922+const char *kallsyms_lookup(unsigned long addr,
66923+ unsigned long *symbolsize,
66924+ unsigned long *offset,
66925+ char **modname, char *namebuf);
66926+#endif
66927
66928 /* This macro allows us to keep printk typechecking */
66929 static void __check_printsym_format(const char *fmt, ...)
66930diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
66931index 6adcc29..13369e8 100644
66932--- a/include/linux/kgdb.h
66933+++ b/include/linux/kgdb.h
66934@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
66935
66936 extern int kgdb_connected;
66937
66938-extern atomic_t kgdb_setting_breakpoint;
66939-extern atomic_t kgdb_cpu_doing_single_step;
66940+extern atomic_unchecked_t kgdb_setting_breakpoint;
66941+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
66942
66943 extern struct task_struct *kgdb_usethread;
66944 extern struct task_struct *kgdb_contthread;
66945@@ -235,7 +235,7 @@ struct kgdb_arch {
66946 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
66947 void (*remove_all_hw_break)(void);
66948 void (*correct_hw_break)(void);
66949-};
66950+} __do_const;
66951
66952 /**
66953 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
66954@@ -257,14 +257,14 @@ struct kgdb_io {
66955 int (*init) (void);
66956 void (*pre_exception) (void);
66957 void (*post_exception) (void);
66958-};
66959+} __do_const;
66960
66961-extern struct kgdb_arch arch_kgdb_ops;
66962+extern const struct kgdb_arch arch_kgdb_ops;
66963
66964 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
66965
66966-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
66967-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
66968+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
66969+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
66970
66971 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
66972 extern int kgdb_mem2hex(char *mem, char *buf, int count);
66973diff --git a/include/linux/kmod.h b/include/linux/kmod.h
66974index 384ca8b..83dd97d 100644
66975--- a/include/linux/kmod.h
66976+++ b/include/linux/kmod.h
66977@@ -31,6 +31,8 @@
66978 * usually useless though. */
66979 extern int __request_module(bool wait, const char *name, ...) \
66980 __attribute__((format(printf, 2, 3)));
66981+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
66982+ __attribute__((format(printf, 3, 4)));
66983 #define request_module(mod...) __request_module(true, mod)
66984 #define request_module_nowait(mod...) __request_module(false, mod)
66985 #define try_then_request_module(x, mod...) \
66986diff --git a/include/linux/kobject.h b/include/linux/kobject.h
66987index 58ae8e0..3950d3c 100644
66988--- a/include/linux/kobject.h
66989+++ b/include/linux/kobject.h
66990@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
66991
66992 struct kobj_type {
66993 void (*release)(struct kobject *kobj);
66994- struct sysfs_ops *sysfs_ops;
66995+ const struct sysfs_ops *sysfs_ops;
66996 struct attribute **default_attrs;
66997 };
66998
66999@@ -118,9 +118,9 @@ struct kobj_uevent_env {
67000 };
67001
67002 struct kset_uevent_ops {
67003- int (*filter)(struct kset *kset, struct kobject *kobj);
67004- const char *(*name)(struct kset *kset, struct kobject *kobj);
67005- int (*uevent)(struct kset *kset, struct kobject *kobj,
67006+ int (* const filter)(struct kset *kset, struct kobject *kobj);
67007+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
67008+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
67009 struct kobj_uevent_env *env);
67010 };
67011
67012@@ -132,7 +132,7 @@ struct kobj_attribute {
67013 const char *buf, size_t count);
67014 };
67015
67016-extern struct sysfs_ops kobj_sysfs_ops;
67017+extern const struct sysfs_ops kobj_sysfs_ops;
67018
67019 /**
67020 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67021@@ -155,14 +155,14 @@ struct kset {
67022 struct list_head list;
67023 spinlock_t list_lock;
67024 struct kobject kobj;
67025- struct kset_uevent_ops *uevent_ops;
67026+ const struct kset_uevent_ops *uevent_ops;
67027 };
67028
67029 extern void kset_init(struct kset *kset);
67030 extern int __must_check kset_register(struct kset *kset);
67031 extern void kset_unregister(struct kset *kset);
67032 extern struct kset * __must_check kset_create_and_add(const char *name,
67033- struct kset_uevent_ops *u,
67034+ const struct kset_uevent_ops *u,
67035 struct kobject *parent_kobj);
67036
67037 static inline struct kset *to_kset(struct kobject *kobj)
67038diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67039index c728a50..752d821 100644
67040--- a/include/linux/kvm_host.h
67041+++ b/include/linux/kvm_host.h
67042@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67043 void vcpu_load(struct kvm_vcpu *vcpu);
67044 void vcpu_put(struct kvm_vcpu *vcpu);
67045
67046-int kvm_init(void *opaque, unsigned int vcpu_size,
67047+int kvm_init(const void *opaque, unsigned int vcpu_size,
67048 struct module *module);
67049 void kvm_exit(void);
67050
67051@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67052 struct kvm_guest_debug *dbg);
67053 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67054
67055-int kvm_arch_init(void *opaque);
67056+int kvm_arch_init(const void *opaque);
67057 void kvm_arch_exit(void);
67058
67059 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67060diff --git a/include/linux/libata.h b/include/linux/libata.h
67061index a069916..223edde 100644
67062--- a/include/linux/libata.h
67063+++ b/include/linux/libata.h
67064@@ -525,11 +525,11 @@ struct ata_ioports {
67065
67066 struct ata_host {
67067 spinlock_t lock;
67068- struct device *dev;
67069+ struct device *dev;
67070 void __iomem * const *iomap;
67071 unsigned int n_ports;
67072 void *private_data;
67073- struct ata_port_operations *ops;
67074+ const struct ata_port_operations *ops;
67075 unsigned long flags;
67076 #ifdef CONFIG_ATA_ACPI
67077 acpi_handle acpi_handle;
67078@@ -710,7 +710,7 @@ struct ata_link {
67079
67080 struct ata_port {
67081 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67082- struct ata_port_operations *ops;
67083+ const struct ata_port_operations *ops;
67084 spinlock_t *lock;
67085 /* Flags owned by the EH context. Only EH should touch these once the
67086 port is active */
67087@@ -884,7 +884,7 @@ struct ata_port_operations {
67088 * fields must be pointers.
67089 */
67090 const struct ata_port_operations *inherits;
67091-};
67092+} __do_const;
67093
67094 struct ata_port_info {
67095 unsigned long flags;
67096@@ -892,7 +892,7 @@ struct ata_port_info {
67097 unsigned long pio_mask;
67098 unsigned long mwdma_mask;
67099 unsigned long udma_mask;
67100- struct ata_port_operations *port_ops;
67101+ const struct ata_port_operations *port_ops;
67102 void *private_data;
67103 };
67104
67105@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67106 extern const unsigned long sata_deb_timing_hotplug[];
67107 extern const unsigned long sata_deb_timing_long[];
67108
67109-extern struct ata_port_operations ata_dummy_port_ops;
67110+extern const struct ata_port_operations ata_dummy_port_ops;
67111 extern const struct ata_port_info ata_dummy_port_info;
67112
67113 static inline const unsigned long *
67114@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67115 struct scsi_host_template *sht);
67116 extern void ata_host_detach(struct ata_host *host);
67117 extern void ata_host_init(struct ata_host *, struct device *,
67118- unsigned long, struct ata_port_operations *);
67119+ unsigned long, const struct ata_port_operations *);
67120 extern int ata_scsi_detect(struct scsi_host_template *sht);
67121 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67122 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67123diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67124index fbc48f8..0886e57 100644
67125--- a/include/linux/lockd/bind.h
67126+++ b/include/linux/lockd/bind.h
67127@@ -23,13 +23,13 @@ struct svc_rqst;
67128 * This is the set of functions for lockd->nfsd communication
67129 */
67130 struct nlmsvc_binding {
67131- __be32 (*fopen)(struct svc_rqst *,
67132+ __be32 (* const fopen)(struct svc_rqst *,
67133 struct nfs_fh *,
67134 struct file **);
67135- void (*fclose)(struct file *);
67136+ void (* const fclose)(struct file *);
67137 };
67138
67139-extern struct nlmsvc_binding * nlmsvc_ops;
67140+extern const struct nlmsvc_binding * nlmsvc_ops;
67141
67142 /*
67143 * Similar to nfs_client_initdata, but without the NFS-specific
67144diff --git a/include/linux/mca.h b/include/linux/mca.h
67145index 3797270..7765ede 100644
67146--- a/include/linux/mca.h
67147+++ b/include/linux/mca.h
67148@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67149 int region);
67150 void * (*mca_transform_memory)(struct mca_device *,
67151 void *memory);
67152-};
67153+} __no_const;
67154
67155 struct mca_bus {
67156 u64 default_dma_mask;
67157diff --git a/include/linux/memory.h b/include/linux/memory.h
67158index 37fa19b..b597c85 100644
67159--- a/include/linux/memory.h
67160+++ b/include/linux/memory.h
67161@@ -108,7 +108,7 @@ struct memory_accessor {
67162 size_t count);
67163 ssize_t (*write)(struct memory_accessor *, const char *buf,
67164 off_t offset, size_t count);
67165-};
67166+} __no_const;
67167
67168 /*
67169 * Kernel text modification mutex, used for code patching. Users of this lock
67170diff --git a/include/linux/mm.h b/include/linux/mm.h
67171index 11e5be6..1ff2423 100644
67172--- a/include/linux/mm.h
67173+++ b/include/linux/mm.h
67174@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67175
67176 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67177 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67178+
67179+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67180+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67181+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67182+#else
67183 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67184+#endif
67185+
67186 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67187 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67188
67189@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
67190 int set_page_dirty_lock(struct page *page);
67191 int clear_page_dirty_for_io(struct page *page);
67192
67193-/* Is the vma a continuation of the stack vma above it? */
67194-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
67195-{
67196- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
67197-}
67198-
67199 extern unsigned long move_page_tables(struct vm_area_struct *vma,
67200 unsigned long old_addr, struct vm_area_struct *new_vma,
67201 unsigned long new_addr, unsigned long len);
67202@@ -890,6 +891,8 @@ struct shrinker {
67203 extern void register_shrinker(struct shrinker *);
67204 extern void unregister_shrinker(struct shrinker *);
67205
67206+pgprot_t vm_get_page_prot(unsigned long vm_flags);
67207+
67208 int vma_wants_writenotify(struct vm_area_struct *vma);
67209
67210 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
67211@@ -1162,6 +1165,7 @@ out:
67212 }
67213
67214 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
67215+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
67216
67217 extern unsigned long do_brk(unsigned long, unsigned long);
67218
67219@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
67220 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
67221 struct vm_area_struct **pprev);
67222
67223+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
67224+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
67225+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
67226+
67227 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
67228 NULL if none. Assume start_addr < end_addr. */
67229 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
67230@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
67231 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
67232 }
67233
67234-pgprot_t vm_get_page_prot(unsigned long vm_flags);
67235 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
67236 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
67237 unsigned long pfn, unsigned long size, pgprot_t);
67238@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
67239 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
67240 extern int sysctl_memory_failure_early_kill;
67241 extern int sysctl_memory_failure_recovery;
67242-extern atomic_long_t mce_bad_pages;
67243+extern atomic_long_unchecked_t mce_bad_pages;
67244+
67245+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67246+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
67247+#else
67248+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
67249+#endif
67250
67251 #endif /* __KERNEL__ */
67252 #endif /* _LINUX_MM_H */
67253diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
67254index 9d12ed5..8023125 100644
67255--- a/include/linux/mm_types.h
67256+++ b/include/linux/mm_types.h
67257@@ -186,6 +186,8 @@ struct vm_area_struct {
67258 #ifdef CONFIG_NUMA
67259 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
67260 #endif
67261+
67262+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
67263 };
67264
67265 struct core_thread {
67266@@ -287,6 +289,24 @@ struct mm_struct {
67267 #ifdef CONFIG_MMU_NOTIFIER
67268 struct mmu_notifier_mm *mmu_notifier_mm;
67269 #endif
67270+
67271+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67272+ unsigned long pax_flags;
67273+#endif
67274+
67275+#ifdef CONFIG_PAX_DLRESOLVE
67276+ unsigned long call_dl_resolve;
67277+#endif
67278+
67279+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
67280+ unsigned long call_syscall;
67281+#endif
67282+
67283+#ifdef CONFIG_PAX_ASLR
67284+ unsigned long delta_mmap; /* randomized offset */
67285+ unsigned long delta_stack; /* randomized offset */
67286+#endif
67287+
67288 };
67289
67290 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
67291diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
67292index 4e02ee2..afb159e 100644
67293--- a/include/linux/mmu_notifier.h
67294+++ b/include/linux/mmu_notifier.h
67295@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
67296 */
67297 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
67298 ({ \
67299- pte_t __pte; \
67300+ pte_t ___pte; \
67301 struct vm_area_struct *___vma = __vma; \
67302 unsigned long ___address = __address; \
67303- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
67304+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
67305 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
67306- __pte; \
67307+ ___pte; \
67308 })
67309
67310 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
67311diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
67312index 6c31a2a..4b0e930 100644
67313--- a/include/linux/mmzone.h
67314+++ b/include/linux/mmzone.h
67315@@ -350,7 +350,7 @@ struct zone {
67316 unsigned long flags; /* zone flags, see below */
67317
67318 /* Zone statistics */
67319- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67320+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67321
67322 /*
67323 * prev_priority holds the scanning priority for this zone. It is
67324diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
67325index f58e9d8..3503935 100644
67326--- a/include/linux/mod_devicetable.h
67327+++ b/include/linux/mod_devicetable.h
67328@@ -12,7 +12,7 @@
67329 typedef unsigned long kernel_ulong_t;
67330 #endif
67331
67332-#define PCI_ANY_ID (~0)
67333+#define PCI_ANY_ID ((__u16)~0)
67334
67335 struct pci_device_id {
67336 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
67337@@ -131,7 +131,7 @@ struct usb_device_id {
67338 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
67339 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
67340
67341-#define HID_ANY_ID (~0)
67342+#define HID_ANY_ID (~0U)
67343
67344 struct hid_device_id {
67345 __u16 bus;
67346diff --git a/include/linux/module.h b/include/linux/module.h
67347index 482efc8..642032b 100644
67348--- a/include/linux/module.h
67349+++ b/include/linux/module.h
67350@@ -16,6 +16,7 @@
67351 #include <linux/kobject.h>
67352 #include <linux/moduleparam.h>
67353 #include <linux/tracepoint.h>
67354+#include <linux/fs.h>
67355
67356 #include <asm/local.h>
67357 #include <asm/module.h>
67358@@ -287,16 +288,16 @@ struct module
67359 int (*init)(void);
67360
67361 /* If this is non-NULL, vfree after init() returns */
67362- void *module_init;
67363+ void *module_init_rx, *module_init_rw;
67364
67365 /* Here is the actual code + data, vfree'd on unload. */
67366- void *module_core;
67367+ void *module_core_rx, *module_core_rw;
67368
67369 /* Here are the sizes of the init and core sections */
67370- unsigned int init_size, core_size;
67371+ unsigned int init_size_rw, core_size_rw;
67372
67373 /* The size of the executable code in each section. */
67374- unsigned int init_text_size, core_text_size;
67375+ unsigned int init_size_rx, core_size_rx;
67376
67377 /* Arch-specific module values */
67378 struct mod_arch_specific arch;
67379@@ -345,6 +346,10 @@ struct module
67380 #ifdef CONFIG_EVENT_TRACING
67381 struct ftrace_event_call *trace_events;
67382 unsigned int num_trace_events;
67383+ struct file_operations trace_id;
67384+ struct file_operations trace_enable;
67385+ struct file_operations trace_format;
67386+ struct file_operations trace_filter;
67387 #endif
67388 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
67389 unsigned long *ftrace_callsites;
67390@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
67391 bool is_module_address(unsigned long addr);
67392 bool is_module_text_address(unsigned long addr);
67393
67394+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
67395+{
67396+
67397+#ifdef CONFIG_PAX_KERNEXEC
67398+ if (ktla_ktva(addr) >= (unsigned long)start &&
67399+ ktla_ktva(addr) < (unsigned long)start + size)
67400+ return 1;
67401+#endif
67402+
67403+ return ((void *)addr >= start && (void *)addr < start + size);
67404+}
67405+
67406+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
67407+{
67408+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
67409+}
67410+
67411+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
67412+{
67413+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
67414+}
67415+
67416+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
67417+{
67418+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
67419+}
67420+
67421+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
67422+{
67423+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
67424+}
67425+
67426 static inline int within_module_core(unsigned long addr, struct module *mod)
67427 {
67428- return (unsigned long)mod->module_core <= addr &&
67429- addr < (unsigned long)mod->module_core + mod->core_size;
67430+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
67431 }
67432
67433 static inline int within_module_init(unsigned long addr, struct module *mod)
67434 {
67435- return (unsigned long)mod->module_init <= addr &&
67436- addr < (unsigned long)mod->module_init + mod->init_size;
67437+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
67438 }
67439
67440 /* Search for module by name: must hold module_mutex. */
67441diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
67442index c1f40c2..682ca53 100644
67443--- a/include/linux/moduleloader.h
67444+++ b/include/linux/moduleloader.h
67445@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
67446 sections. Returns NULL on failure. */
67447 void *module_alloc(unsigned long size);
67448
67449+#ifdef CONFIG_PAX_KERNEXEC
67450+void *module_alloc_exec(unsigned long size);
67451+#else
67452+#define module_alloc_exec(x) module_alloc(x)
67453+#endif
67454+
67455 /* Free memory returned from module_alloc. */
67456 void module_free(struct module *mod, void *module_region);
67457
67458+#ifdef CONFIG_PAX_KERNEXEC
67459+void module_free_exec(struct module *mod, void *module_region);
67460+#else
67461+#define module_free_exec(x, y) module_free((x), (y))
67462+#endif
67463+
67464 /* Apply the given relocation to the (simplified) ELF. Return -error
67465 or 0. */
67466 int apply_relocate(Elf_Shdr *sechdrs,
67467diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
67468index 82a9124..8a5f622 100644
67469--- a/include/linux/moduleparam.h
67470+++ b/include/linux/moduleparam.h
67471@@ -132,7 +132,7 @@ struct kparam_array
67472
67473 /* Actually copy string: maxlen param is usually sizeof(string). */
67474 #define module_param_string(name, string, len, perm) \
67475- static const struct kparam_string __param_string_##name \
67476+ static const struct kparam_string __param_string_##name __used \
67477 = { len, string }; \
67478 __module_param_call(MODULE_PARAM_PREFIX, name, \
67479 param_set_copystring, param_get_string, \
67480@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
67481
67482 /* Comma-separated array: *nump is set to number they actually specified. */
67483 #define module_param_array_named(name, array, type, nump, perm) \
67484- static const struct kparam_array __param_arr_##name \
67485+ static const struct kparam_array __param_arr_##name __used \
67486 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
67487 sizeof(array[0]), array }; \
67488 __module_param_call(MODULE_PARAM_PREFIX, name, \
67489diff --git a/include/linux/mutex.h b/include/linux/mutex.h
67490index 878cab4..c92cb3e 100644
67491--- a/include/linux/mutex.h
67492+++ b/include/linux/mutex.h
67493@@ -51,7 +51,7 @@ struct mutex {
67494 spinlock_t wait_lock;
67495 struct list_head wait_list;
67496 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
67497- struct thread_info *owner;
67498+ struct task_struct *owner;
67499 #endif
67500 #ifdef CONFIG_DEBUG_MUTEXES
67501 const char *name;
67502diff --git a/include/linux/namei.h b/include/linux/namei.h
67503index ec0f607..d19e675 100644
67504--- a/include/linux/namei.h
67505+++ b/include/linux/namei.h
67506@@ -22,7 +22,7 @@ struct nameidata {
67507 unsigned int flags;
67508 int last_type;
67509 unsigned depth;
67510- char *saved_names[MAX_NESTED_LINKS + 1];
67511+ const char *saved_names[MAX_NESTED_LINKS + 1];
67512
67513 /* Intent data */
67514 union {
67515@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
67516 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
67517 extern void unlock_rename(struct dentry *, struct dentry *);
67518
67519-static inline void nd_set_link(struct nameidata *nd, char *path)
67520+static inline void nd_set_link(struct nameidata *nd, const char *path)
67521 {
67522 nd->saved_names[nd->depth] = path;
67523 }
67524
67525-static inline char *nd_get_link(struct nameidata *nd)
67526+static inline const char *nd_get_link(const struct nameidata *nd)
67527 {
67528 return nd->saved_names[nd->depth];
67529 }
67530diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
67531index 9d7e8f7..04428c5 100644
67532--- a/include/linux/netdevice.h
67533+++ b/include/linux/netdevice.h
67534@@ -637,6 +637,7 @@ struct net_device_ops {
67535 u16 xid);
67536 #endif
67537 };
67538+typedef struct net_device_ops __no_const net_device_ops_no_const;
67539
67540 /*
67541 * The DEVICE structure.
67542diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
67543new file mode 100644
67544index 0000000..33f4af8
67545--- /dev/null
67546+++ b/include/linux/netfilter/xt_gradm.h
67547@@ -0,0 +1,9 @@
67548+#ifndef _LINUX_NETFILTER_XT_GRADM_H
67549+#define _LINUX_NETFILTER_XT_GRADM_H 1
67550+
67551+struct xt_gradm_mtinfo {
67552+ __u16 flags;
67553+ __u16 invflags;
67554+};
67555+
67556+#endif
67557diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
67558index b359c4a..c08b334 100644
67559--- a/include/linux/nodemask.h
67560+++ b/include/linux/nodemask.h
67561@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
67562
67563 #define any_online_node(mask) \
67564 ({ \
67565- int node; \
67566- for_each_node_mask(node, (mask)) \
67567- if (node_online(node)) \
67568+ int __node; \
67569+ for_each_node_mask(__node, (mask)) \
67570+ if (node_online(__node)) \
67571 break; \
67572- node; \
67573+ __node; \
67574 })
67575
67576 #define num_online_nodes() num_node_state(N_ONLINE)
67577diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
67578index 5171639..7cf4235 100644
67579--- a/include/linux/oprofile.h
67580+++ b/include/linux/oprofile.h
67581@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
67582 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
67583 char const * name, ulong * val);
67584
67585-/** Create a file for read-only access to an atomic_t. */
67586+/** Create a file for read-only access to an atomic_unchecked_t. */
67587 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
67588- char const * name, atomic_t * val);
67589+ char const * name, atomic_unchecked_t * val);
67590
67591 /** create a directory */
67592 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
67593diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
67594index 3c62ed4..8924c7c 100644
67595--- a/include/linux/pagemap.h
67596+++ b/include/linux/pagemap.h
67597@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
67598 if (((unsigned long)uaddr & PAGE_MASK) !=
67599 ((unsigned long)end & PAGE_MASK))
67600 ret = __get_user(c, end);
67601+ (void)c;
67602 }
67603+ (void)c;
67604 return ret;
67605 }
67606
67607diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
67608index 81c9689..a567a55 100644
67609--- a/include/linux/perf_event.h
67610+++ b/include/linux/perf_event.h
67611@@ -476,7 +476,7 @@ struct hw_perf_event {
67612 struct hrtimer hrtimer;
67613 };
67614 };
67615- atomic64_t prev_count;
67616+ atomic64_unchecked_t prev_count;
67617 u64 sample_period;
67618 u64 last_period;
67619 atomic64_t period_left;
67620@@ -557,7 +557,7 @@ struct perf_event {
67621 const struct pmu *pmu;
67622
67623 enum perf_event_active_state state;
67624- atomic64_t count;
67625+ atomic64_unchecked_t count;
67626
67627 /*
67628 * These are the total time in nanoseconds that the event
67629@@ -595,8 +595,8 @@ struct perf_event {
67630 * These accumulate total time (in nanoseconds) that children
67631 * events have been enabled and running, respectively.
67632 */
67633- atomic64_t child_total_time_enabled;
67634- atomic64_t child_total_time_running;
67635+ atomic64_unchecked_t child_total_time_enabled;
67636+ atomic64_unchecked_t child_total_time_running;
67637
67638 /*
67639 * Protect attach/detach and child_list:
67640diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
67641index b43a9e0..b77d869 100644
67642--- a/include/linux/pipe_fs_i.h
67643+++ b/include/linux/pipe_fs_i.h
67644@@ -46,9 +46,9 @@ struct pipe_inode_info {
67645 wait_queue_head_t wait;
67646 unsigned int nrbufs, curbuf;
67647 struct page *tmp_page;
67648- unsigned int readers;
67649- unsigned int writers;
67650- unsigned int waiting_writers;
67651+ atomic_t readers;
67652+ atomic_t writers;
67653+ atomic_t waiting_writers;
67654 unsigned int r_counter;
67655 unsigned int w_counter;
67656 struct fasync_struct *fasync_readers;
67657diff --git a/include/linux/poison.h b/include/linux/poison.h
67658index 34066ff..e95d744 100644
67659--- a/include/linux/poison.h
67660+++ b/include/linux/poison.h
67661@@ -19,8 +19,8 @@
67662 * under normal circumstances, used to verify that nobody uses
67663 * non-initialized list entries.
67664 */
67665-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
67666-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
67667+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
67668+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
67669
67670 /********** include/linux/timer.h **********/
67671 /*
67672diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
67673index 4f71bf4..77ffa64 100644
67674--- a/include/linux/posix-timers.h
67675+++ b/include/linux/posix-timers.h
67676@@ -67,7 +67,7 @@ struct k_itimer {
67677 };
67678
67679 struct k_clock {
67680- int res; /* in nanoseconds */
67681+ const int res; /* in nanoseconds */
67682 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
67683 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
67684 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
67685diff --git a/include/linux/preempt.h b/include/linux/preempt.h
67686index 72b1a10..13303a9 100644
67687--- a/include/linux/preempt.h
67688+++ b/include/linux/preempt.h
67689@@ -110,7 +110,7 @@ struct preempt_ops {
67690 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
67691 void (*sched_out)(struct preempt_notifier *notifier,
67692 struct task_struct *next);
67693-};
67694+} __no_const;
67695
67696 /**
67697 * preempt_notifier - key for installing preemption notifiers
67698diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
67699index 379eaed..1bf73e3 100644
67700--- a/include/linux/proc_fs.h
67701+++ b/include/linux/proc_fs.h
67702@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
67703 return proc_create_data(name, mode, parent, proc_fops, NULL);
67704 }
67705
67706+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
67707+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
67708+{
67709+#ifdef CONFIG_GRKERNSEC_PROC_USER
67710+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
67711+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67712+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
67713+#else
67714+ return proc_create_data(name, mode, parent, proc_fops, NULL);
67715+#endif
67716+}
67717+
67718+
67719 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
67720 mode_t mode, struct proc_dir_entry *base,
67721 read_proc_t *read_proc, void * data)
67722@@ -256,7 +269,7 @@ union proc_op {
67723 int (*proc_show)(struct seq_file *m,
67724 struct pid_namespace *ns, struct pid *pid,
67725 struct task_struct *task);
67726-};
67727+} __no_const;
67728
67729 struct ctl_table_header;
67730 struct ctl_table;
67731diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
67732index 7456d7d..6c1cfc9 100644
67733--- a/include/linux/ptrace.h
67734+++ b/include/linux/ptrace.h
67735@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
67736 extern void exit_ptrace(struct task_struct *tracer);
67737 #define PTRACE_MODE_READ 1
67738 #define PTRACE_MODE_ATTACH 2
67739-/* Returns 0 on success, -errno on denial. */
67740-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
67741 /* Returns true on success, false on denial. */
67742 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
67743+/* Returns true on success, false on denial. */
67744+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
67745
67746 static inline int ptrace_reparented(struct task_struct *child)
67747 {
67748diff --git a/include/linux/random.h b/include/linux/random.h
67749index 2948046..3262567 100644
67750--- a/include/linux/random.h
67751+++ b/include/linux/random.h
67752@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
67753 u32 random32(void);
67754 void srandom32(u32 seed);
67755
67756+static inline unsigned long pax_get_random_long(void)
67757+{
67758+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
67759+}
67760+
67761 #endif /* __KERNEL___ */
67762
67763 #endif /* _LINUX_RANDOM_H */
67764diff --git a/include/linux/reboot.h b/include/linux/reboot.h
67765index 988e55f..17cb4ef 100644
67766--- a/include/linux/reboot.h
67767+++ b/include/linux/reboot.h
67768@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
67769 * Architecture-specific implementations of sys_reboot commands.
67770 */
67771
67772-extern void machine_restart(char *cmd);
67773-extern void machine_halt(void);
67774-extern void machine_power_off(void);
67775+extern void machine_restart(char *cmd) __noreturn;
67776+extern void machine_halt(void) __noreturn;
67777+extern void machine_power_off(void) __noreturn;
67778
67779 extern void machine_shutdown(void);
67780 struct pt_regs;
67781@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
67782 */
67783
67784 extern void kernel_restart_prepare(char *cmd);
67785-extern void kernel_restart(char *cmd);
67786-extern void kernel_halt(void);
67787-extern void kernel_power_off(void);
67788+extern void kernel_restart(char *cmd) __noreturn;
67789+extern void kernel_halt(void) __noreturn;
67790+extern void kernel_power_off(void) __noreturn;
67791
67792 void ctrl_alt_del(void);
67793
67794@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
67795 * Emergency restart, callable from an interrupt handler.
67796 */
67797
67798-extern void emergency_restart(void);
67799+extern void emergency_restart(void) __noreturn;
67800 #include <asm/emergency-restart.h>
67801
67802 #endif
67803diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
67804index dd31e7b..5b03c5c 100644
67805--- a/include/linux/reiserfs_fs.h
67806+++ b/include/linux/reiserfs_fs.h
67807@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67808 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
67809
67810 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67811-#define get_generation(s) atomic_read (&fs_generation(s))
67812+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67813 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67814 #define __fs_changed(gen,s) (gen != get_generation (s))
67815 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
67816@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
67817 */
67818
67819 struct item_operations {
67820- int (*bytes_number) (struct item_head * ih, int block_size);
67821- void (*decrement_key) (struct cpu_key *);
67822- int (*is_left_mergeable) (struct reiserfs_key * ih,
67823+ int (* const bytes_number) (struct item_head * ih, int block_size);
67824+ void (* const decrement_key) (struct cpu_key *);
67825+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
67826 unsigned long bsize);
67827- void (*print_item) (struct item_head *, char *item);
67828- void (*check_item) (struct item_head *, char *item);
67829+ void (* const print_item) (struct item_head *, char *item);
67830+ void (* const check_item) (struct item_head *, char *item);
67831
67832- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
67833+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
67834 int is_affected, int insert_size);
67835- int (*check_left) (struct virtual_item * vi, int free,
67836+ int (* const check_left) (struct virtual_item * vi, int free,
67837 int start_skip, int end_skip);
67838- int (*check_right) (struct virtual_item * vi, int free);
67839- int (*part_size) (struct virtual_item * vi, int from, int to);
67840- int (*unit_num) (struct virtual_item * vi);
67841- void (*print_vi) (struct virtual_item * vi);
67842+ int (* const check_right) (struct virtual_item * vi, int free);
67843+ int (* const part_size) (struct virtual_item * vi, int from, int to);
67844+ int (* const unit_num) (struct virtual_item * vi);
67845+ void (* const print_vi) (struct virtual_item * vi);
67846 };
67847
67848-extern struct item_operations *item_ops[TYPE_ANY + 1];
67849+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
67850
67851 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
67852 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
67853diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
67854index dab68bb..0688727 100644
67855--- a/include/linux/reiserfs_fs_sb.h
67856+++ b/include/linux/reiserfs_fs_sb.h
67857@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
67858 /* Comment? -Hans */
67859 wait_queue_head_t s_wait;
67860 /* To be obsoleted soon by per buffer seals.. -Hans */
67861- atomic_t s_generation_counter; // increased by one every time the
67862+ atomic_unchecked_t s_generation_counter; // increased by one every time the
67863 // tree gets re-balanced
67864 unsigned long s_properties; /* File system properties. Currently holds
67865 on-disk FS format */
67866diff --git a/include/linux/relay.h b/include/linux/relay.h
67867index 14a86bc..17d0700 100644
67868--- a/include/linux/relay.h
67869+++ b/include/linux/relay.h
67870@@ -159,7 +159,7 @@ struct rchan_callbacks
67871 * The callback should return 0 if successful, negative if not.
67872 */
67873 int (*remove_buf_file)(struct dentry *dentry);
67874-};
67875+} __no_const;
67876
67877 /*
67878 * CONFIG_RELAY kernel API, kernel/relay.c
67879diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
67880index 3392c59..a746428 100644
67881--- a/include/linux/rfkill.h
67882+++ b/include/linux/rfkill.h
67883@@ -144,6 +144,7 @@ struct rfkill_ops {
67884 void (*query)(struct rfkill *rfkill, void *data);
67885 int (*set_block)(void *data, bool blocked);
67886 };
67887+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
67888
67889 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
67890 /**
67891diff --git a/include/linux/sched.h b/include/linux/sched.h
67892index 71849bf..40217dc 100644
67893--- a/include/linux/sched.h
67894+++ b/include/linux/sched.h
67895@@ -101,6 +101,7 @@ struct bio;
67896 struct fs_struct;
67897 struct bts_context;
67898 struct perf_event_context;
67899+struct linux_binprm;
67900
67901 /*
67902 * List of flags we want to share for kernel threads,
67903@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
67904 extern signed long schedule_timeout_uninterruptible(signed long timeout);
67905 asmlinkage void __schedule(void);
67906 asmlinkage void schedule(void);
67907-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
67908+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
67909
67910 struct nsproxy;
67911 struct user_namespace;
67912@@ -371,9 +372,12 @@ struct user_namespace;
67913 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
67914
67915 extern int sysctl_max_map_count;
67916+extern unsigned long sysctl_heap_stack_gap;
67917
67918 #include <linux/aio.h>
67919
67920+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
67921+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
67922 extern unsigned long
67923 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
67924 unsigned long, unsigned long);
67925@@ -666,6 +670,16 @@ struct signal_struct {
67926 struct tty_audit_buf *tty_audit_buf;
67927 #endif
67928
67929+#ifdef CONFIG_GRKERNSEC
67930+ u32 curr_ip;
67931+ u32 saved_ip;
67932+ u32 gr_saddr;
67933+ u32 gr_daddr;
67934+ u16 gr_sport;
67935+ u16 gr_dport;
67936+ u8 used_accept:1;
67937+#endif
67938+
67939 int oom_adj; /* OOM kill score adjustment (bit shift) */
67940 };
67941
67942@@ -723,6 +737,11 @@ struct user_struct {
67943 struct key *session_keyring; /* UID's default session keyring */
67944 #endif
67945
67946+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
67947+ unsigned int banned;
67948+ unsigned long ban_expires;
67949+#endif
67950+
67951 /* Hash table maintenance information */
67952 struct hlist_node uidhash_node;
67953 uid_t uid;
67954@@ -1328,8 +1347,8 @@ struct task_struct {
67955 struct list_head thread_group;
67956
67957 struct completion *vfork_done; /* for vfork() */
67958- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
67959- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
67960+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
67961+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
67962
67963 cputime_t utime, stime, utimescaled, stimescaled;
67964 cputime_t gtime;
67965@@ -1343,16 +1362,6 @@ struct task_struct {
67966 struct task_cputime cputime_expires;
67967 struct list_head cpu_timers[3];
67968
67969-/* process credentials */
67970- const struct cred *real_cred; /* objective and real subjective task
67971- * credentials (COW) */
67972- const struct cred *cred; /* effective (overridable) subjective task
67973- * credentials (COW) */
67974- struct mutex cred_guard_mutex; /* guard against foreign influences on
67975- * credential calculations
67976- * (notably. ptrace) */
67977- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
67978-
67979 char comm[TASK_COMM_LEN]; /* executable name excluding path
67980 - access with [gs]et_task_comm (which lock
67981 it with task_lock())
67982@@ -1369,6 +1378,10 @@ struct task_struct {
67983 #endif
67984 /* CPU-specific state of this task */
67985 struct thread_struct thread;
67986+/* thread_info moved to task_struct */
67987+#ifdef CONFIG_X86
67988+ struct thread_info tinfo;
67989+#endif
67990 /* filesystem information */
67991 struct fs_struct *fs;
67992 /* open file information */
67993@@ -1436,6 +1449,15 @@ struct task_struct {
67994 int hardirq_context;
67995 int softirq_context;
67996 #endif
67997+
67998+/* process credentials */
67999+ const struct cred *real_cred; /* objective and real subjective task
68000+ * credentials (COW) */
68001+ struct mutex cred_guard_mutex; /* guard against foreign influences on
68002+ * credential calculations
68003+ * (notably. ptrace) */
68004+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68005+
68006 #ifdef CONFIG_LOCKDEP
68007 # define MAX_LOCK_DEPTH 48UL
68008 u64 curr_chain_key;
68009@@ -1456,6 +1478,9 @@ struct task_struct {
68010
68011 struct backing_dev_info *backing_dev_info;
68012
68013+ const struct cred *cred; /* effective (overridable) subjective task
68014+ * credentials (COW) */
68015+
68016 struct io_context *io_context;
68017
68018 unsigned long ptrace_message;
68019@@ -1519,6 +1544,21 @@ struct task_struct {
68020 unsigned long default_timer_slack_ns;
68021
68022 struct list_head *scm_work_list;
68023+
68024+#ifdef CONFIG_GRKERNSEC
68025+ /* grsecurity */
68026+ struct dentry *gr_chroot_dentry;
68027+ struct acl_subject_label *acl;
68028+ struct acl_role_label *role;
68029+ struct file *exec_file;
68030+ u16 acl_role_id;
68031+ /* is this the task that authenticated to the special role */
68032+ u8 acl_sp_role;
68033+ u8 is_writable;
68034+ u8 brute;
68035+ u8 gr_is_chrooted;
68036+#endif
68037+
68038 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68039 /* Index of current stored adress in ret_stack */
68040 int curr_ret_stack;
68041@@ -1542,6 +1582,57 @@ struct task_struct {
68042 #endif /* CONFIG_TRACING */
68043 };
68044
68045+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68046+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68047+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68048+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68049+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68050+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68051+
68052+#ifdef CONFIG_PAX_SOFTMODE
68053+extern int pax_softmode;
68054+#endif
68055+
68056+extern int pax_check_flags(unsigned long *);
68057+
68058+/* if tsk != current then task_lock must be held on it */
68059+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68060+static inline unsigned long pax_get_flags(struct task_struct *tsk)
68061+{
68062+ if (likely(tsk->mm))
68063+ return tsk->mm->pax_flags;
68064+ else
68065+ return 0UL;
68066+}
68067+
68068+/* if tsk != current then task_lock must be held on it */
68069+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68070+{
68071+ if (likely(tsk->mm)) {
68072+ tsk->mm->pax_flags = flags;
68073+ return 0;
68074+ }
68075+ return -EINVAL;
68076+}
68077+#endif
68078+
68079+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68080+extern void pax_set_initial_flags(struct linux_binprm *bprm);
68081+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68082+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68083+#endif
68084+
68085+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68086+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68087+extern void pax_report_refcount_overflow(struct pt_regs *regs);
68088+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68089+
68090+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68091+extern void pax_track_stack(void);
68092+#else
68093+static inline void pax_track_stack(void) {}
68094+#endif
68095+
68096 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68097 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68098
68099@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68100 #define PF_DUMPCORE 0x00000200 /* dumped core */
68101 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68102 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68103-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68104+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68105 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68106 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68107 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68108@@ -1978,7 +2069,9 @@ void yield(void);
68109 extern struct exec_domain default_exec_domain;
68110
68111 union thread_union {
68112+#ifndef CONFIG_X86
68113 struct thread_info thread_info;
68114+#endif
68115 unsigned long stack[THREAD_SIZE/sizeof(long)];
68116 };
68117
68118@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
68119 */
68120
68121 extern struct task_struct *find_task_by_vpid(pid_t nr);
68122+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68123 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68124 struct pid_namespace *ns);
68125
68126@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68127 extern void exit_itimers(struct signal_struct *);
68128 extern void flush_itimer_signals(void);
68129
68130-extern NORET_TYPE void do_group_exit(int);
68131+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68132
68133 extern void daemonize(const char *, ...);
68134 extern int allow_signal(int);
68135@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68136
68137 #endif
68138
68139-static inline int object_is_on_stack(void *obj)
68140+static inline int object_starts_on_stack(void *obj)
68141 {
68142- void *stack = task_stack_page(current);
68143+ const void *stack = task_stack_page(current);
68144
68145 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68146 }
68147
68148+#ifdef CONFIG_PAX_USERCOPY
68149+extern int object_is_on_stack(const void *obj, unsigned long len);
68150+#endif
68151+
68152 extern void thread_info_cache_init(void);
68153
68154 #ifdef CONFIG_DEBUG_STACK_USAGE
68155diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68156index 1ee2c05..81b7ec4 100644
68157--- a/include/linux/screen_info.h
68158+++ b/include/linux/screen_info.h
68159@@ -42,7 +42,8 @@ struct screen_info {
68160 __u16 pages; /* 0x32 */
68161 __u16 vesa_attributes; /* 0x34 */
68162 __u32 capabilities; /* 0x36 */
68163- __u8 _reserved[6]; /* 0x3a */
68164+ __u16 vesapm_size; /* 0x3a */
68165+ __u8 _reserved[4]; /* 0x3c */
68166 } __attribute__((packed));
68167
68168 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68169diff --git a/include/linux/security.h b/include/linux/security.h
68170index d40d23f..253bd14 100644
68171--- a/include/linux/security.h
68172+++ b/include/linux/security.h
68173@@ -34,6 +34,7 @@
68174 #include <linux/key.h>
68175 #include <linux/xfrm.h>
68176 #include <linux/gfp.h>
68177+#include <linux/grsecurity.h>
68178 #include <net/flow.h>
68179
68180 /* Maximum number of letters for an LSM name string */
68181diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
68182index 8366d8f..2307490 100644
68183--- a/include/linux/seq_file.h
68184+++ b/include/linux/seq_file.h
68185@@ -32,6 +32,7 @@ struct seq_operations {
68186 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
68187 int (*show) (struct seq_file *m, void *v);
68188 };
68189+typedef struct seq_operations __no_const seq_operations_no_const;
68190
68191 #define SEQ_SKIP 1
68192
68193diff --git a/include/linux/shm.h b/include/linux/shm.h
68194index eca6235..c7417ed 100644
68195--- a/include/linux/shm.h
68196+++ b/include/linux/shm.h
68197@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
68198 pid_t shm_cprid;
68199 pid_t shm_lprid;
68200 struct user_struct *mlock_user;
68201+#ifdef CONFIG_GRKERNSEC
68202+ time_t shm_createtime;
68203+ pid_t shm_lapid;
68204+#endif
68205 };
68206
68207 /* shm_mode upper byte flags */
68208diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
68209index bcdd660..6e12e11 100644
68210--- a/include/linux/skbuff.h
68211+++ b/include/linux/skbuff.h
68212@@ -14,6 +14,7 @@
68213 #ifndef _LINUX_SKBUFF_H
68214 #define _LINUX_SKBUFF_H
68215
68216+#include <linux/const.h>
68217 #include <linux/kernel.h>
68218 #include <linux/kmemcheck.h>
68219 #include <linux/compiler.h>
68220@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
68221 */
68222 static inline int skb_queue_empty(const struct sk_buff_head *list)
68223 {
68224- return list->next == (struct sk_buff *)list;
68225+ return list->next == (const struct sk_buff *)list;
68226 }
68227
68228 /**
68229@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
68230 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68231 const struct sk_buff *skb)
68232 {
68233- return (skb->next == (struct sk_buff *) list);
68234+ return (skb->next == (const struct sk_buff *) list);
68235 }
68236
68237 /**
68238@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68239 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
68240 const struct sk_buff *skb)
68241 {
68242- return (skb->prev == (struct sk_buff *) list);
68243+ return (skb->prev == (const struct sk_buff *) list);
68244 }
68245
68246 /**
68247@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
68248 * headroom, you should not reduce this.
68249 */
68250 #ifndef NET_SKB_PAD
68251-#define NET_SKB_PAD 32
68252+#define NET_SKB_PAD (_AC(32,UL))
68253 #endif
68254
68255 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
68256diff --git a/include/linux/slab.h b/include/linux/slab.h
68257index 2da8372..a3be824 100644
68258--- a/include/linux/slab.h
68259+++ b/include/linux/slab.h
68260@@ -11,12 +11,20 @@
68261
68262 #include <linux/gfp.h>
68263 #include <linux/types.h>
68264+#include <linux/err.h>
68265
68266 /*
68267 * Flags to pass to kmem_cache_create().
68268 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
68269 */
68270 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
68271+
68272+#ifdef CONFIG_PAX_USERCOPY
68273+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
68274+#else
68275+#define SLAB_USERCOPY 0x00000000UL
68276+#endif
68277+
68278 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
68279 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
68280 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
68281@@ -82,10 +90,13 @@
68282 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
68283 * Both make kfree a no-op.
68284 */
68285-#define ZERO_SIZE_PTR ((void *)16)
68286+#define ZERO_SIZE_PTR \
68287+({ \
68288+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
68289+ (void *)(-MAX_ERRNO-1L); \
68290+})
68291
68292-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
68293- (unsigned long)ZERO_SIZE_PTR)
68294+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
68295
68296 /*
68297 * struct kmem_cache related prototypes
68298@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
68299 void kfree(const void *);
68300 void kzfree(const void *);
68301 size_t ksize(const void *);
68302+void check_object_size(const void *ptr, unsigned long n, bool to);
68303
68304 /*
68305 * Allocator specific definitions. These are mainly used to establish optimized
68306@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
68307
68308 void __init kmem_cache_init_late(void);
68309
68310+#define kmalloc(x, y) \
68311+({ \
68312+ void *___retval; \
68313+ intoverflow_t ___x = (intoverflow_t)x; \
68314+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
68315+ ___retval = NULL; \
68316+ else \
68317+ ___retval = kmalloc((size_t)___x, (y)); \
68318+ ___retval; \
68319+})
68320+
68321+#define kmalloc_node(x, y, z) \
68322+({ \
68323+ void *___retval; \
68324+ intoverflow_t ___x = (intoverflow_t)x; \
68325+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
68326+ ___retval = NULL; \
68327+ else \
68328+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
68329+ ___retval; \
68330+})
68331+
68332+#define kzalloc(x, y) \
68333+({ \
68334+ void *___retval; \
68335+ intoverflow_t ___x = (intoverflow_t)x; \
68336+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
68337+ ___retval = NULL; \
68338+ else \
68339+ ___retval = kzalloc((size_t)___x, (y)); \
68340+ ___retval; \
68341+})
68342+
68343 #endif /* _LINUX_SLAB_H */
68344diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
68345index 850d057..d9dfe3c 100644
68346--- a/include/linux/slab_def.h
68347+++ b/include/linux/slab_def.h
68348@@ -69,10 +69,10 @@ struct kmem_cache {
68349 unsigned long node_allocs;
68350 unsigned long node_frees;
68351 unsigned long node_overflow;
68352- atomic_t allochit;
68353- atomic_t allocmiss;
68354- atomic_t freehit;
68355- atomic_t freemiss;
68356+ atomic_unchecked_t allochit;
68357+ atomic_unchecked_t allocmiss;
68358+ atomic_unchecked_t freehit;
68359+ atomic_unchecked_t freemiss;
68360
68361 /*
68362 * If debugging is enabled, then the allocator can add additional
68363diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
68364index 5ad70a6..57f9f65 100644
68365--- a/include/linux/slub_def.h
68366+++ b/include/linux/slub_def.h
68367@@ -86,7 +86,7 @@ struct kmem_cache {
68368 struct kmem_cache_order_objects max;
68369 struct kmem_cache_order_objects min;
68370 gfp_t allocflags; /* gfp flags to use on each alloc */
68371- int refcount; /* Refcount for slab cache destroy */
68372+ atomic_t refcount; /* Refcount for slab cache destroy */
68373 void (*ctor)(void *);
68374 int inuse; /* Offset to metadata */
68375 int align; /* Alignment */
68376@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
68377 #endif
68378
68379 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
68380-void *__kmalloc(size_t size, gfp_t flags);
68381+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
68382
68383 #ifdef CONFIG_KMEMTRACE
68384 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
68385diff --git a/include/linux/sonet.h b/include/linux/sonet.h
68386index 67ad11f..0bbd8af 100644
68387--- a/include/linux/sonet.h
68388+++ b/include/linux/sonet.h
68389@@ -61,7 +61,7 @@ struct sonet_stats {
68390 #include <asm/atomic.h>
68391
68392 struct k_sonet_stats {
68393-#define __HANDLE_ITEM(i) atomic_t i
68394+#define __HANDLE_ITEM(i) atomic_unchecked_t i
68395 __SONET_ITEMS
68396 #undef __HANDLE_ITEM
68397 };
68398diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
68399index 6f52b4d..5500323 100644
68400--- a/include/linux/sunrpc/cache.h
68401+++ b/include/linux/sunrpc/cache.h
68402@@ -125,7 +125,7 @@ struct cache_detail {
68403 */
68404 struct cache_req {
68405 struct cache_deferred_req *(*defer)(struct cache_req *req);
68406-};
68407+} __no_const;
68408 /* this must be embedded in a deferred_request that is being
68409 * delayed awaiting cache-fill
68410 */
68411diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
68412index 8ed9642..101ceab 100644
68413--- a/include/linux/sunrpc/clnt.h
68414+++ b/include/linux/sunrpc/clnt.h
68415@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
68416 {
68417 switch (sap->sa_family) {
68418 case AF_INET:
68419- return ntohs(((struct sockaddr_in *)sap)->sin_port);
68420+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
68421 case AF_INET6:
68422- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
68423+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
68424 }
68425 return 0;
68426 }
68427@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
68428 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
68429 const struct sockaddr *src)
68430 {
68431- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
68432+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
68433 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
68434
68435 dsin->sin_family = ssin->sin_family;
68436@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
68437 if (sa->sa_family != AF_INET6)
68438 return 0;
68439
68440- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
68441+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
68442 }
68443
68444 #endif /* __KERNEL__ */
68445diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
68446index c14fe86..393245e 100644
68447--- a/include/linux/sunrpc/svc_rdma.h
68448+++ b/include/linux/sunrpc/svc_rdma.h
68449@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
68450 extern unsigned int svcrdma_max_requests;
68451 extern unsigned int svcrdma_max_req_size;
68452
68453-extern atomic_t rdma_stat_recv;
68454-extern atomic_t rdma_stat_read;
68455-extern atomic_t rdma_stat_write;
68456-extern atomic_t rdma_stat_sq_starve;
68457-extern atomic_t rdma_stat_rq_starve;
68458-extern atomic_t rdma_stat_rq_poll;
68459-extern atomic_t rdma_stat_rq_prod;
68460-extern atomic_t rdma_stat_sq_poll;
68461-extern atomic_t rdma_stat_sq_prod;
68462+extern atomic_unchecked_t rdma_stat_recv;
68463+extern atomic_unchecked_t rdma_stat_read;
68464+extern atomic_unchecked_t rdma_stat_write;
68465+extern atomic_unchecked_t rdma_stat_sq_starve;
68466+extern atomic_unchecked_t rdma_stat_rq_starve;
68467+extern atomic_unchecked_t rdma_stat_rq_poll;
68468+extern atomic_unchecked_t rdma_stat_rq_prod;
68469+extern atomic_unchecked_t rdma_stat_sq_poll;
68470+extern atomic_unchecked_t rdma_stat_sq_prod;
68471
68472 #define RPCRDMA_VERSION 1
68473
68474diff --git a/include/linux/suspend.h b/include/linux/suspend.h
68475index 5e781d8..1e62818 100644
68476--- a/include/linux/suspend.h
68477+++ b/include/linux/suspend.h
68478@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
68479 * which require special recovery actions in that situation.
68480 */
68481 struct platform_suspend_ops {
68482- int (*valid)(suspend_state_t state);
68483- int (*begin)(suspend_state_t state);
68484- int (*prepare)(void);
68485- int (*prepare_late)(void);
68486- int (*enter)(suspend_state_t state);
68487- void (*wake)(void);
68488- void (*finish)(void);
68489- void (*end)(void);
68490- void (*recover)(void);
68491+ int (* const valid)(suspend_state_t state);
68492+ int (* const begin)(suspend_state_t state);
68493+ int (* const prepare)(void);
68494+ int (* const prepare_late)(void);
68495+ int (* const enter)(suspend_state_t state);
68496+ void (* const wake)(void);
68497+ void (* const finish)(void);
68498+ void (* const end)(void);
68499+ void (* const recover)(void);
68500 };
68501
68502 #ifdef CONFIG_SUSPEND
68503@@ -120,7 +120,7 @@ struct platform_suspend_ops {
68504 * suspend_set_ops - set platform dependent suspend operations
68505 * @ops: The new suspend operations to set.
68506 */
68507-extern void suspend_set_ops(struct platform_suspend_ops *ops);
68508+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
68509 extern int suspend_valid_only_mem(suspend_state_t state);
68510
68511 /**
68512@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
68513 #else /* !CONFIG_SUSPEND */
68514 #define suspend_valid_only_mem NULL
68515
68516-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
68517+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
68518 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
68519 #endif /* !CONFIG_SUSPEND */
68520
68521@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
68522 * platforms which require special recovery actions in that situation.
68523 */
68524 struct platform_hibernation_ops {
68525- int (*begin)(void);
68526- void (*end)(void);
68527- int (*pre_snapshot)(void);
68528- void (*finish)(void);
68529- int (*prepare)(void);
68530- int (*enter)(void);
68531- void (*leave)(void);
68532- int (*pre_restore)(void);
68533- void (*restore_cleanup)(void);
68534- void (*recover)(void);
68535+ int (* const begin)(void);
68536+ void (* const end)(void);
68537+ int (* const pre_snapshot)(void);
68538+ void (* const finish)(void);
68539+ int (* const prepare)(void);
68540+ int (* const enter)(void);
68541+ void (* const leave)(void);
68542+ int (* const pre_restore)(void);
68543+ void (* const restore_cleanup)(void);
68544+ void (* const recover)(void);
68545 };
68546
68547 #ifdef CONFIG_HIBERNATION
68548@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
68549 extern void swsusp_unset_page_free(struct page *);
68550 extern unsigned long get_safe_page(gfp_t gfp_mask);
68551
68552-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
68553+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
68554 extern int hibernate(void);
68555 extern bool system_entering_hibernation(void);
68556 #else /* CONFIG_HIBERNATION */
68557@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
68558 static inline void swsusp_set_page_free(struct page *p) {}
68559 static inline void swsusp_unset_page_free(struct page *p) {}
68560
68561-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
68562+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
68563 static inline int hibernate(void) { return -ENOSYS; }
68564 static inline bool system_entering_hibernation(void) { return false; }
68565 #endif /* CONFIG_HIBERNATION */
68566diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
68567index 0eb6942..a805cb6 100644
68568--- a/include/linux/sysctl.h
68569+++ b/include/linux/sysctl.h
68570@@ -164,7 +164,11 @@ enum
68571 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
68572 };
68573
68574-
68575+#ifdef CONFIG_PAX_SOFTMODE
68576+enum {
68577+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
68578+};
68579+#endif
68580
68581 /* CTL_VM names: */
68582 enum
68583@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
68584
68585 extern int proc_dostring(struct ctl_table *, int,
68586 void __user *, size_t *, loff_t *);
68587+extern int proc_dostring_modpriv(struct ctl_table *, int,
68588+ void __user *, size_t *, loff_t *);
68589 extern int proc_dointvec(struct ctl_table *, int,
68590 void __user *, size_t *, loff_t *);
68591 extern int proc_dointvec_minmax(struct ctl_table *, int,
68592@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
68593
68594 extern ctl_handler sysctl_data;
68595 extern ctl_handler sysctl_string;
68596+extern ctl_handler sysctl_string_modpriv;
68597 extern ctl_handler sysctl_intvec;
68598 extern ctl_handler sysctl_jiffies;
68599 extern ctl_handler sysctl_ms_jiffies;
68600diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
68601index 9d68fed..71f02cc 100644
68602--- a/include/linux/sysfs.h
68603+++ b/include/linux/sysfs.h
68604@@ -75,8 +75,8 @@ struct bin_attribute {
68605 };
68606
68607 struct sysfs_ops {
68608- ssize_t (*show)(struct kobject *, struct attribute *,char *);
68609- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
68610+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
68611+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
68612 };
68613
68614 struct sysfs_dirent;
68615diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
68616index a8cc4e1..98d3b85 100644
68617--- a/include/linux/thread_info.h
68618+++ b/include/linux/thread_info.h
68619@@ -23,7 +23,7 @@ struct restart_block {
68620 };
68621 /* For futex_wait and futex_wait_requeue_pi */
68622 struct {
68623- u32 *uaddr;
68624+ u32 __user *uaddr;
68625 u32 val;
68626 u32 flags;
68627 u32 bitset;
68628diff --git a/include/linux/tty.h b/include/linux/tty.h
68629index e9c57e9..ee6d489 100644
68630--- a/include/linux/tty.h
68631+++ b/include/linux/tty.h
68632@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
68633 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
68634 extern void tty_ldisc_enable(struct tty_struct *tty);
68635
68636-
68637 /* n_tty.c */
68638 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
68639
68640diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
68641index 0c4ee9b..9f7c426 100644
68642--- a/include/linux/tty_ldisc.h
68643+++ b/include/linux/tty_ldisc.h
68644@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
68645
68646 struct module *owner;
68647
68648- int refcount;
68649+ atomic_t refcount;
68650 };
68651
68652 struct tty_ldisc {
68653diff --git a/include/linux/types.h b/include/linux/types.h
68654index c42724f..d190eee 100644
68655--- a/include/linux/types.h
68656+++ b/include/linux/types.h
68657@@ -191,10 +191,26 @@ typedef struct {
68658 volatile int counter;
68659 } atomic_t;
68660
68661+#ifdef CONFIG_PAX_REFCOUNT
68662+typedef struct {
68663+ volatile int counter;
68664+} atomic_unchecked_t;
68665+#else
68666+typedef atomic_t atomic_unchecked_t;
68667+#endif
68668+
68669 #ifdef CONFIG_64BIT
68670 typedef struct {
68671 volatile long counter;
68672 } atomic64_t;
68673+
68674+#ifdef CONFIG_PAX_REFCOUNT
68675+typedef struct {
68676+ volatile long counter;
68677+} atomic64_unchecked_t;
68678+#else
68679+typedef atomic64_t atomic64_unchecked_t;
68680+#endif
68681 #endif
68682
68683 struct ustat {
68684diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
68685index 6b58367..53a3e8e 100644
68686--- a/include/linux/uaccess.h
68687+++ b/include/linux/uaccess.h
68688@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
68689 long ret; \
68690 mm_segment_t old_fs = get_fs(); \
68691 \
68692- set_fs(KERNEL_DS); \
68693 pagefault_disable(); \
68694- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
68695- pagefault_enable(); \
68696+ set_fs(KERNEL_DS); \
68697+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
68698 set_fs(old_fs); \
68699+ pagefault_enable(); \
68700 ret; \
68701 })
68702
68703@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
68704 * Safely read from address @src to the buffer at @dst. If a kernel fault
68705 * happens, handle that and return -EFAULT.
68706 */
68707-extern long probe_kernel_read(void *dst, void *src, size_t size);
68708+extern long probe_kernel_read(void *dst, const void *src, size_t size);
68709
68710 /*
68711 * probe_kernel_write(): safely attempt to write to a location
68712@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
68713 * Safely write to address @dst from the buffer at @src. If a kernel fault
68714 * happens, handle that and return -EFAULT.
68715 */
68716-extern long probe_kernel_write(void *dst, void *src, size_t size);
68717+extern long probe_kernel_write(void *dst, const void *src, size_t size);
68718
68719 #endif /* __LINUX_UACCESS_H__ */
68720diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
68721index 99c1b4d..bb94261 100644
68722--- a/include/linux/unaligned/access_ok.h
68723+++ b/include/linux/unaligned/access_ok.h
68724@@ -6,32 +6,32 @@
68725
68726 static inline u16 get_unaligned_le16(const void *p)
68727 {
68728- return le16_to_cpup((__le16 *)p);
68729+ return le16_to_cpup((const __le16 *)p);
68730 }
68731
68732 static inline u32 get_unaligned_le32(const void *p)
68733 {
68734- return le32_to_cpup((__le32 *)p);
68735+ return le32_to_cpup((const __le32 *)p);
68736 }
68737
68738 static inline u64 get_unaligned_le64(const void *p)
68739 {
68740- return le64_to_cpup((__le64 *)p);
68741+ return le64_to_cpup((const __le64 *)p);
68742 }
68743
68744 static inline u16 get_unaligned_be16(const void *p)
68745 {
68746- return be16_to_cpup((__be16 *)p);
68747+ return be16_to_cpup((const __be16 *)p);
68748 }
68749
68750 static inline u32 get_unaligned_be32(const void *p)
68751 {
68752- return be32_to_cpup((__be32 *)p);
68753+ return be32_to_cpup((const __be32 *)p);
68754 }
68755
68756 static inline u64 get_unaligned_be64(const void *p)
68757 {
68758- return be64_to_cpup((__be64 *)p);
68759+ return be64_to_cpup((const __be64 *)p);
68760 }
68761
68762 static inline void put_unaligned_le16(u16 val, void *p)
68763diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
68764index 79b9837..b5a56f9 100644
68765--- a/include/linux/vermagic.h
68766+++ b/include/linux/vermagic.h
68767@@ -26,9 +26,35 @@
68768 #define MODULE_ARCH_VERMAGIC ""
68769 #endif
68770
68771+#ifdef CONFIG_PAX_REFCOUNT
68772+#define MODULE_PAX_REFCOUNT "REFCOUNT "
68773+#else
68774+#define MODULE_PAX_REFCOUNT ""
68775+#endif
68776+
68777+#ifdef CONSTIFY_PLUGIN
68778+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
68779+#else
68780+#define MODULE_CONSTIFY_PLUGIN ""
68781+#endif
68782+
68783+#ifdef STACKLEAK_PLUGIN
68784+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
68785+#else
68786+#define MODULE_STACKLEAK_PLUGIN ""
68787+#endif
68788+
68789+#ifdef CONFIG_GRKERNSEC
68790+#define MODULE_GRSEC "GRSEC "
68791+#else
68792+#define MODULE_GRSEC ""
68793+#endif
68794+
68795 #define VERMAGIC_STRING \
68796 UTS_RELEASE " " \
68797 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
68798 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
68799- MODULE_ARCH_VERMAGIC
68800+ MODULE_ARCH_VERMAGIC \
68801+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
68802+ MODULE_GRSEC
68803
68804diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
68805index 819a634..462ac12 100644
68806--- a/include/linux/vmalloc.h
68807+++ b/include/linux/vmalloc.h
68808@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
68809 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
68810 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
68811 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
68812+
68813+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
68814+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
68815+#endif
68816+
68817 /* bits [20..32] reserved for arch specific ioremap internals */
68818
68819 /*
68820@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
68821
68822 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
68823
68824+#define vmalloc(x) \
68825+({ \
68826+ void *___retval; \
68827+ intoverflow_t ___x = (intoverflow_t)x; \
68828+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
68829+ ___retval = NULL; \
68830+ else \
68831+ ___retval = vmalloc((unsigned long)___x); \
68832+ ___retval; \
68833+})
68834+
68835+#define __vmalloc(x, y, z) \
68836+({ \
68837+ void *___retval; \
68838+ intoverflow_t ___x = (intoverflow_t)x; \
68839+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
68840+ ___retval = NULL; \
68841+ else \
68842+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
68843+ ___retval; \
68844+})
68845+
68846+#define vmalloc_user(x) \
68847+({ \
68848+ void *___retval; \
68849+ intoverflow_t ___x = (intoverflow_t)x; \
68850+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
68851+ ___retval = NULL; \
68852+ else \
68853+ ___retval = vmalloc_user((unsigned long)___x); \
68854+ ___retval; \
68855+})
68856+
68857+#define vmalloc_exec(x) \
68858+({ \
68859+ void *___retval; \
68860+ intoverflow_t ___x = (intoverflow_t)x; \
68861+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
68862+ ___retval = NULL; \
68863+ else \
68864+ ___retval = vmalloc_exec((unsigned long)___x); \
68865+ ___retval; \
68866+})
68867+
68868+#define vmalloc_node(x, y) \
68869+({ \
68870+ void *___retval; \
68871+ intoverflow_t ___x = (intoverflow_t)x; \
68872+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
68873+ ___retval = NULL; \
68874+ else \
68875+ ___retval = vmalloc_node((unsigned long)___x, (y));\
68876+ ___retval; \
68877+})
68878+
68879+#define vmalloc_32(x) \
68880+({ \
68881+ void *___retval; \
68882+ intoverflow_t ___x = (intoverflow_t)x; \
68883+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
68884+ ___retval = NULL; \
68885+ else \
68886+ ___retval = vmalloc_32((unsigned long)___x); \
68887+ ___retval; \
68888+})
68889+
68890+#define vmalloc_32_user(x) \
68891+({ \
68892+ void *___retval; \
68893+ intoverflow_t ___x = (intoverflow_t)x; \
68894+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
68895+ ___retval = NULL; \
68896+ else \
68897+ ___retval = vmalloc_32_user((unsigned long)___x);\
68898+ ___retval; \
68899+})
68900+
68901 #endif /* _LINUX_VMALLOC_H */
68902diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
68903index 13070d6..aa4159a 100644
68904--- a/include/linux/vmstat.h
68905+++ b/include/linux/vmstat.h
68906@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
68907 /*
68908 * Zone based page accounting with per cpu differentials.
68909 */
68910-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68911+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68912
68913 static inline void zone_page_state_add(long x, struct zone *zone,
68914 enum zone_stat_item item)
68915 {
68916- atomic_long_add(x, &zone->vm_stat[item]);
68917- atomic_long_add(x, &vm_stat[item]);
68918+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
68919+ atomic_long_add_unchecked(x, &vm_stat[item]);
68920 }
68921
68922 static inline unsigned long global_page_state(enum zone_stat_item item)
68923 {
68924- long x = atomic_long_read(&vm_stat[item]);
68925+ long x = atomic_long_read_unchecked(&vm_stat[item]);
68926 #ifdef CONFIG_SMP
68927 if (x < 0)
68928 x = 0;
68929@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
68930 static inline unsigned long zone_page_state(struct zone *zone,
68931 enum zone_stat_item item)
68932 {
68933- long x = atomic_long_read(&zone->vm_stat[item]);
68934+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
68935 #ifdef CONFIG_SMP
68936 if (x < 0)
68937 x = 0;
68938@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
68939 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
68940 enum zone_stat_item item)
68941 {
68942- long x = atomic_long_read(&zone->vm_stat[item]);
68943+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
68944
68945 #ifdef CONFIG_SMP
68946 int cpu;
68947@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
68948
68949 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
68950 {
68951- atomic_long_inc(&zone->vm_stat[item]);
68952- atomic_long_inc(&vm_stat[item]);
68953+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
68954+ atomic_long_inc_unchecked(&vm_stat[item]);
68955 }
68956
68957 static inline void __inc_zone_page_state(struct page *page,
68958@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
68959
68960 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
68961 {
68962- atomic_long_dec(&zone->vm_stat[item]);
68963- atomic_long_dec(&vm_stat[item]);
68964+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
68965+ atomic_long_dec_unchecked(&vm_stat[item]);
68966 }
68967
68968 static inline void __dec_zone_page_state(struct page *page,
68969diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
68970index eed5fcc..5080d24 100644
68971--- a/include/media/saa7146_vv.h
68972+++ b/include/media/saa7146_vv.h
68973@@ -167,7 +167,7 @@ struct saa7146_ext_vv
68974 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
68975
68976 /* the extension can override this */
68977- struct v4l2_ioctl_ops ops;
68978+ v4l2_ioctl_ops_no_const ops;
68979 /* pointer to the saa7146 core ops */
68980 const struct v4l2_ioctl_ops *core_ops;
68981
68982diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
68983index 73c9867..2da8837 100644
68984--- a/include/media/v4l2-dev.h
68985+++ b/include/media/v4l2-dev.h
68986@@ -34,7 +34,7 @@ struct v4l2_device;
68987 #define V4L2_FL_UNREGISTERED (0)
68988
68989 struct v4l2_file_operations {
68990- struct module *owner;
68991+ struct module * const owner;
68992 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
68993 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
68994 unsigned int (*poll) (struct file *, struct poll_table_struct *);
68995@@ -46,6 +46,7 @@ struct v4l2_file_operations {
68996 int (*open) (struct file *);
68997 int (*release) (struct file *);
68998 };
68999+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
69000
69001 /*
69002 * Newer version of video_device, handled by videodev2.c
69003diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69004index 5d5d550..f559ef1 100644
69005--- a/include/media/v4l2-device.h
69006+++ b/include/media/v4l2-device.h
69007@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69008 this function returns 0. If the name ends with a digit (e.g. cx18),
69009 then the name will be set to cx18-0 since cx180 looks really odd. */
69010 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69011- atomic_t *instance);
69012+ atomic_unchecked_t *instance);
69013
69014 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
69015 Since the parent disappears this ensures that v4l2_dev doesn't have an
69016diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
69017index 7a4529d..7244290 100644
69018--- a/include/media/v4l2-ioctl.h
69019+++ b/include/media/v4l2-ioctl.h
69020@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
69021 long (*vidioc_default) (struct file *file, void *fh,
69022 int cmd, void *arg);
69023 };
69024+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
69025
69026
69027 /* v4l debugging and diagnostics */
69028diff --git a/include/net/flow.h b/include/net/flow.h
69029index 809970b..c3df4f3 100644
69030--- a/include/net/flow.h
69031+++ b/include/net/flow.h
69032@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
69033 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
69034 u8 dir, flow_resolve_t resolver);
69035 extern void flow_cache_flush(void);
69036-extern atomic_t flow_cache_genid;
69037+extern atomic_unchecked_t flow_cache_genid;
69038
69039 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
69040 {
69041diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
69042index 15e1f8fe..668837c 100644
69043--- a/include/net/inetpeer.h
69044+++ b/include/net/inetpeer.h
69045@@ -24,7 +24,7 @@ struct inet_peer
69046 __u32 dtime; /* the time of last use of not
69047 * referenced entries */
69048 atomic_t refcnt;
69049- atomic_t rid; /* Frag reception counter */
69050+ atomic_unchecked_t rid; /* Frag reception counter */
69051 __u32 tcp_ts;
69052 unsigned long tcp_ts_stamp;
69053 };
69054diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
69055index 98978e7..2243a3d 100644
69056--- a/include/net/ip_vs.h
69057+++ b/include/net/ip_vs.h
69058@@ -365,7 +365,7 @@ struct ip_vs_conn {
69059 struct ip_vs_conn *control; /* Master control connection */
69060 atomic_t n_control; /* Number of controlled ones */
69061 struct ip_vs_dest *dest; /* real server */
69062- atomic_t in_pkts; /* incoming packet counter */
69063+ atomic_unchecked_t in_pkts; /* incoming packet counter */
69064
69065 /* packet transmitter for different forwarding methods. If it
69066 mangles the packet, it must return NF_DROP or better NF_STOLEN,
69067@@ -466,7 +466,7 @@ struct ip_vs_dest {
69068 union nf_inet_addr addr; /* IP address of the server */
69069 __be16 port; /* port number of the server */
69070 volatile unsigned flags; /* dest status flags */
69071- atomic_t conn_flags; /* flags to copy to conn */
69072+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
69073 atomic_t weight; /* server weight */
69074
69075 atomic_t refcnt; /* reference counter */
69076diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
69077index 69b610a..fe3962c 100644
69078--- a/include/net/irda/ircomm_core.h
69079+++ b/include/net/irda/ircomm_core.h
69080@@ -51,7 +51,7 @@ typedef struct {
69081 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
69082 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
69083 struct ircomm_info *);
69084-} call_t;
69085+} __no_const call_t;
69086
69087 struct ircomm_cb {
69088 irda_queue_t queue;
69089diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
69090index eea2e61..08c692d 100644
69091--- a/include/net/irda/ircomm_tty.h
69092+++ b/include/net/irda/ircomm_tty.h
69093@@ -35,6 +35,7 @@
69094 #include <linux/termios.h>
69095 #include <linux/timer.h>
69096 #include <linux/tty.h> /* struct tty_struct */
69097+#include <asm/local.h>
69098
69099 #include <net/irda/irias_object.h>
69100 #include <net/irda/ircomm_core.h>
69101@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
69102 unsigned short close_delay;
69103 unsigned short closing_wait; /* time to wait before closing */
69104
69105- int open_count;
69106- int blocked_open; /* # of blocked opens */
69107+ local_t open_count;
69108+ local_t blocked_open; /* # of blocked opens */
69109
69110 /* Protect concurent access to :
69111 * o self->open_count
69112diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
69113index f82a1e8..82d81e8 100644
69114--- a/include/net/iucv/af_iucv.h
69115+++ b/include/net/iucv/af_iucv.h
69116@@ -87,7 +87,7 @@ struct iucv_sock {
69117 struct iucv_sock_list {
69118 struct hlist_head head;
69119 rwlock_t lock;
69120- atomic_t autobind_name;
69121+ atomic_unchecked_t autobind_name;
69122 };
69123
69124 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
69125diff --git a/include/net/lapb.h b/include/net/lapb.h
69126index 96cb5dd..25e8d4f 100644
69127--- a/include/net/lapb.h
69128+++ b/include/net/lapb.h
69129@@ -95,7 +95,7 @@ struct lapb_cb {
69130 struct sk_buff_head write_queue;
69131 struct sk_buff_head ack_queue;
69132 unsigned char window;
69133- struct lapb_register_struct callbacks;
69134+ struct lapb_register_struct *callbacks;
69135
69136 /* FRMR control information */
69137 struct lapb_frame frmr_data;
69138diff --git a/include/net/neighbour.h b/include/net/neighbour.h
69139index 3817fda..cdb2343 100644
69140--- a/include/net/neighbour.h
69141+++ b/include/net/neighbour.h
69142@@ -131,7 +131,7 @@ struct neigh_ops
69143 int (*connected_output)(struct sk_buff*);
69144 int (*hh_output)(struct sk_buff*);
69145 int (*queue_xmit)(struct sk_buff*);
69146-};
69147+} __do_const;
69148
69149 struct pneigh_entry
69150 {
69151diff --git a/include/net/netlink.h b/include/net/netlink.h
69152index c344646..4778c71 100644
69153--- a/include/net/netlink.h
69154+++ b/include/net/netlink.h
69155@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
69156 {
69157 return (remaining >= (int) sizeof(struct nlmsghdr) &&
69158 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
69159- nlh->nlmsg_len <= remaining);
69160+ nlh->nlmsg_len <= (unsigned int)remaining);
69161 }
69162
69163 /**
69164@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
69165 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
69166 {
69167 if (mark)
69168- skb_trim(skb, (unsigned char *) mark - skb->data);
69169+ skb_trim(skb, (const unsigned char *) mark - skb->data);
69170 }
69171
69172 /**
69173diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
69174index 9a4b8b7..e49e077 100644
69175--- a/include/net/netns/ipv4.h
69176+++ b/include/net/netns/ipv4.h
69177@@ -54,7 +54,7 @@ struct netns_ipv4 {
69178 int current_rt_cache_rebuild_count;
69179
69180 struct timer_list rt_secret_timer;
69181- atomic_t rt_genid;
69182+ atomic_unchecked_t rt_genid;
69183
69184 #ifdef CONFIG_IP_MROUTE
69185 struct sock *mroute_sk;
69186diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
69187index 8a6d529..171f401 100644
69188--- a/include/net/sctp/sctp.h
69189+++ b/include/net/sctp/sctp.h
69190@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
69191
69192 #else /* SCTP_DEBUG */
69193
69194-#define SCTP_DEBUG_PRINTK(whatever...)
69195-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
69196+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
69197+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
69198 #define SCTP_ENABLE_DEBUG
69199 #define SCTP_DISABLE_DEBUG
69200 #define SCTP_ASSERT(expr, str, func)
69201diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
69202index d97f689..f3b90ab 100644
69203--- a/include/net/secure_seq.h
69204+++ b/include/net/secure_seq.h
69205@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
69206 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
69207 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
69208 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
69209- __be16 dport);
69210+ __be16 dport);
69211 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
69212 __be16 sport, __be16 dport);
69213 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69214- __be16 sport, __be16 dport);
69215+ __be16 sport, __be16 dport);
69216 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
69217- __be16 sport, __be16 dport);
69218+ __be16 sport, __be16 dport);
69219 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69220- __be16 sport, __be16 dport);
69221+ __be16 sport, __be16 dport);
69222
69223 #endif /* _NET_SECURE_SEQ */
69224diff --git a/include/net/sock.h b/include/net/sock.h
69225index 9f96394..76fc9c7 100644
69226--- a/include/net/sock.h
69227+++ b/include/net/sock.h
69228@@ -272,7 +272,7 @@ struct sock {
69229 rwlock_t sk_callback_lock;
69230 int sk_err,
69231 sk_err_soft;
69232- atomic_t sk_drops;
69233+ atomic_unchecked_t sk_drops;
69234 unsigned short sk_ack_backlog;
69235 unsigned short sk_max_ack_backlog;
69236 __u32 sk_priority;
69237@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
69238 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
69239 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
69240 #else
69241-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
69242+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
69243 int inc)
69244 {
69245 }
69246diff --git a/include/net/tcp.h b/include/net/tcp.h
69247index 6cfe18b..dd21acb 100644
69248--- a/include/net/tcp.h
69249+++ b/include/net/tcp.h
69250@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
69251 struct tcp_seq_afinfo {
69252 char *name;
69253 sa_family_t family;
69254- struct file_operations seq_fops;
69255- struct seq_operations seq_ops;
69256+ file_operations_no_const seq_fops;
69257+ seq_operations_no_const seq_ops;
69258 };
69259
69260 struct tcp_iter_state {
69261diff --git a/include/net/udp.h b/include/net/udp.h
69262index f98abd2..b4b042f 100644
69263--- a/include/net/udp.h
69264+++ b/include/net/udp.h
69265@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
69266 char *name;
69267 sa_family_t family;
69268 struct udp_table *udp_table;
69269- struct file_operations seq_fops;
69270- struct seq_operations seq_ops;
69271+ file_operations_no_const seq_fops;
69272+ seq_operations_no_const seq_ops;
69273 };
69274
69275 struct udp_iter_state {
69276diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
69277index cbb822e..e9c1cbe 100644
69278--- a/include/rdma/iw_cm.h
69279+++ b/include/rdma/iw_cm.h
69280@@ -129,7 +129,7 @@ struct iw_cm_verbs {
69281 int backlog);
69282
69283 int (*destroy_listen)(struct iw_cm_id *cm_id);
69284-};
69285+} __no_const;
69286
69287 /**
69288 * iw_create_cm_id - Create an IW CM identifier.
69289diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
69290index 09a124b..caa8ca8 100644
69291--- a/include/scsi/libfc.h
69292+++ b/include/scsi/libfc.h
69293@@ -675,6 +675,7 @@ struct libfc_function_template {
69294 */
69295 void (*disc_stop_final) (struct fc_lport *);
69296 };
69297+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
69298
69299 /* information used by the discovery layer */
69300 struct fc_disc {
69301@@ -707,7 +708,7 @@ struct fc_lport {
69302 struct fc_disc disc;
69303
69304 /* Operational Information */
69305- struct libfc_function_template tt;
69306+ libfc_function_template_no_const tt;
69307 u8 link_up;
69308 u8 qfull;
69309 enum fc_lport_state state;
69310diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
69311index de8e180..f15e0d7 100644
69312--- a/include/scsi/scsi_device.h
69313+++ b/include/scsi/scsi_device.h
69314@@ -156,9 +156,9 @@ struct scsi_device {
69315 unsigned int max_device_blocked; /* what device_blocked counts down from */
69316 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
69317
69318- atomic_t iorequest_cnt;
69319- atomic_t iodone_cnt;
69320- atomic_t ioerr_cnt;
69321+ atomic_unchecked_t iorequest_cnt;
69322+ atomic_unchecked_t iodone_cnt;
69323+ atomic_unchecked_t ioerr_cnt;
69324
69325 struct device sdev_gendev,
69326 sdev_dev;
69327diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
69328index fc50bd6..81ba9cb 100644
69329--- a/include/scsi/scsi_transport_fc.h
69330+++ b/include/scsi/scsi_transport_fc.h
69331@@ -708,7 +708,7 @@ struct fc_function_template {
69332 unsigned long show_host_system_hostname:1;
69333
69334 unsigned long disable_target_scan:1;
69335-};
69336+} __do_const;
69337
69338
69339 /**
69340diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
69341index 3dae3f7..8440d6f 100644
69342--- a/include/sound/ac97_codec.h
69343+++ b/include/sound/ac97_codec.h
69344@@ -419,15 +419,15 @@
69345 struct snd_ac97;
69346
69347 struct snd_ac97_build_ops {
69348- int (*build_3d) (struct snd_ac97 *ac97);
69349- int (*build_specific) (struct snd_ac97 *ac97);
69350- int (*build_spdif) (struct snd_ac97 *ac97);
69351- int (*build_post_spdif) (struct snd_ac97 *ac97);
69352+ int (* const build_3d) (struct snd_ac97 *ac97);
69353+ int (* const build_specific) (struct snd_ac97 *ac97);
69354+ int (* const build_spdif) (struct snd_ac97 *ac97);
69355+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
69356 #ifdef CONFIG_PM
69357- void (*suspend) (struct snd_ac97 *ac97);
69358- void (*resume) (struct snd_ac97 *ac97);
69359+ void (* const suspend) (struct snd_ac97 *ac97);
69360+ void (* const resume) (struct snd_ac97 *ac97);
69361 #endif
69362- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69363+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69364 };
69365
69366 struct snd_ac97_bus_ops {
69367@@ -477,7 +477,7 @@ struct snd_ac97_template {
69368
69369 struct snd_ac97 {
69370 /* -- lowlevel (hardware) driver specific -- */
69371- struct snd_ac97_build_ops * build_ops;
69372+ const struct snd_ac97_build_ops * build_ops;
69373 void *private_data;
69374 void (*private_free) (struct snd_ac97 *ac97);
69375 /* --- */
69376diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
69377index 891cf1a..a94ba2b 100644
69378--- a/include/sound/ak4xxx-adda.h
69379+++ b/include/sound/ak4xxx-adda.h
69380@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
69381 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
69382 unsigned char val);
69383 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69384-};
69385+} __no_const;
69386
69387 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
69388
69389diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
69390index 8c05e47..2b5df97 100644
69391--- a/include/sound/hwdep.h
69392+++ b/include/sound/hwdep.h
69393@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
69394 struct snd_hwdep_dsp_status *status);
69395 int (*dsp_load)(struct snd_hwdep *hw,
69396 struct snd_hwdep_dsp_image *image);
69397-};
69398+} __no_const;
69399
69400 struct snd_hwdep {
69401 struct snd_card *card;
69402diff --git a/include/sound/info.h b/include/sound/info.h
69403index 112e894..6fda5b5 100644
69404--- a/include/sound/info.h
69405+++ b/include/sound/info.h
69406@@ -44,7 +44,7 @@ struct snd_info_entry_text {
69407 struct snd_info_buffer *buffer);
69408 void (*write)(struct snd_info_entry *entry,
69409 struct snd_info_buffer *buffer);
69410-};
69411+} __no_const;
69412
69413 struct snd_info_entry_ops {
69414 int (*open)(struct snd_info_entry *entry,
69415diff --git a/include/sound/pcm.h b/include/sound/pcm.h
69416index de6d981..590a550 100644
69417--- a/include/sound/pcm.h
69418+++ b/include/sound/pcm.h
69419@@ -80,6 +80,7 @@ struct snd_pcm_ops {
69420 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
69421 int (*ack)(struct snd_pcm_substream *substream);
69422 };
69423+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
69424
69425 /*
69426 *
69427diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
69428index 736eac7..fe8a80f 100644
69429--- a/include/sound/sb16_csp.h
69430+++ b/include/sound/sb16_csp.h
69431@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
69432 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
69433 int (*csp_stop) (struct snd_sb_csp * p);
69434 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
69435-};
69436+} __no_const;
69437
69438 /*
69439 * CSP private data
69440diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
69441index 444cd6b..3327cc5 100644
69442--- a/include/sound/ymfpci.h
69443+++ b/include/sound/ymfpci.h
69444@@ -358,7 +358,7 @@ struct snd_ymfpci {
69445 spinlock_t reg_lock;
69446 spinlock_t voice_lock;
69447 wait_queue_head_t interrupt_sleep;
69448- atomic_t interrupt_sleep_count;
69449+ atomic_unchecked_t interrupt_sleep_count;
69450 struct snd_info_entry *proc_entry;
69451 const struct firmware *dsp_microcode;
69452 const struct firmware *controller_microcode;
69453diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
69454index b89f9db..f097b38 100644
69455--- a/include/trace/events/irq.h
69456+++ b/include/trace/events/irq.h
69457@@ -34,7 +34,7 @@
69458 */
69459 TRACE_EVENT(irq_handler_entry,
69460
69461- TP_PROTO(int irq, struct irqaction *action),
69462+ TP_PROTO(int irq, const struct irqaction *action),
69463
69464 TP_ARGS(irq, action),
69465
69466@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
69467 */
69468 TRACE_EVENT(irq_handler_exit,
69469
69470- TP_PROTO(int irq, struct irqaction *action, int ret),
69471+ TP_PROTO(int irq, const struct irqaction *action, int ret),
69472
69473 TP_ARGS(irq, action, ret),
69474
69475@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
69476 */
69477 TRACE_EVENT(softirq_entry,
69478
69479- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69480+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69481
69482 TP_ARGS(h, vec),
69483
69484@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
69485 */
69486 TRACE_EVENT(softirq_exit,
69487
69488- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69489+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69490
69491 TP_ARGS(h, vec),
69492
69493diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
69494index 0993a22..32ba2fe 100644
69495--- a/include/video/uvesafb.h
69496+++ b/include/video/uvesafb.h
69497@@ -177,6 +177,7 @@ struct uvesafb_par {
69498 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
69499 u8 pmi_setpal; /* PMI for palette changes */
69500 u16 *pmi_base; /* protected mode interface location */
69501+ u8 *pmi_code; /* protected mode code location */
69502 void *pmi_start;
69503 void *pmi_pal;
69504 u8 *vbe_state_orig; /*
69505diff --git a/init/Kconfig b/init/Kconfig
69506index d72691b..3996e54 100644
69507--- a/init/Kconfig
69508+++ b/init/Kconfig
69509@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
69510
69511 config COMPAT_BRK
69512 bool "Disable heap randomization"
69513- default y
69514+ default n
69515 help
69516 Randomizing heap placement makes heap exploits harder, but it
69517 also breaks ancient binaries (including anything libc5 based).
69518diff --git a/init/do_mounts.c b/init/do_mounts.c
69519index bb008d0..4fa3933 100644
69520--- a/init/do_mounts.c
69521+++ b/init/do_mounts.c
69522@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
69523
69524 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
69525 {
69526- int err = sys_mount(name, "/root", fs, flags, data);
69527+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
69528 if (err)
69529 return err;
69530
69531- sys_chdir("/root");
69532+ sys_chdir((__force const char __user *)"/root");
69533 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
69534 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
69535 current->fs->pwd.mnt->mnt_sb->s_type->name,
69536@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
69537 va_start(args, fmt);
69538 vsprintf(buf, fmt, args);
69539 va_end(args);
69540- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
69541+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
69542 if (fd >= 0) {
69543 sys_ioctl(fd, FDEJECT, 0);
69544 sys_close(fd);
69545 }
69546 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
69547- fd = sys_open("/dev/console", O_RDWR, 0);
69548+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
69549 if (fd >= 0) {
69550 sys_ioctl(fd, TCGETS, (long)&termios);
69551 termios.c_lflag &= ~ICANON;
69552 sys_ioctl(fd, TCSETSF, (long)&termios);
69553- sys_read(fd, &c, 1);
69554+ sys_read(fd, (char __user *)&c, 1);
69555 termios.c_lflag |= ICANON;
69556 sys_ioctl(fd, TCSETSF, (long)&termios);
69557 sys_close(fd);
69558@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
69559 mount_root();
69560 out:
69561 devtmpfs_mount("dev");
69562- sys_mount(".", "/", NULL, MS_MOVE, NULL);
69563- sys_chroot(".");
69564+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
69565+ sys_chroot((__force char __user *)".");
69566 }
69567diff --git a/init/do_mounts.h b/init/do_mounts.h
69568index f5b978a..69dbfe8 100644
69569--- a/init/do_mounts.h
69570+++ b/init/do_mounts.h
69571@@ -15,15 +15,15 @@ extern int root_mountflags;
69572
69573 static inline int create_dev(char *name, dev_t dev)
69574 {
69575- sys_unlink(name);
69576- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
69577+ sys_unlink((char __force_user *)name);
69578+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
69579 }
69580
69581 #if BITS_PER_LONG == 32
69582 static inline u32 bstat(char *name)
69583 {
69584 struct stat64 stat;
69585- if (sys_stat64(name, &stat) != 0)
69586+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
69587 return 0;
69588 if (!S_ISBLK(stat.st_mode))
69589 return 0;
69590@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
69591 static inline u32 bstat(char *name)
69592 {
69593 struct stat stat;
69594- if (sys_newstat(name, &stat) != 0)
69595+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
69596 return 0;
69597 if (!S_ISBLK(stat.st_mode))
69598 return 0;
69599diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
69600index 614241b..4da046b 100644
69601--- a/init/do_mounts_initrd.c
69602+++ b/init/do_mounts_initrd.c
69603@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
69604 sys_close(old_fd);sys_close(root_fd);
69605 sys_close(0);sys_close(1);sys_close(2);
69606 sys_setsid();
69607- (void) sys_open("/dev/console",O_RDWR,0);
69608+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
69609 (void) sys_dup(0);
69610 (void) sys_dup(0);
69611 return kernel_execve(shell, argv, envp_init);
69612@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
69613 create_dev("/dev/root.old", Root_RAM0);
69614 /* mount initrd on rootfs' /root */
69615 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
69616- sys_mkdir("/old", 0700);
69617- root_fd = sys_open("/", 0, 0);
69618- old_fd = sys_open("/old", 0, 0);
69619+ sys_mkdir((const char __force_user *)"/old", 0700);
69620+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
69621+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
69622 /* move initrd over / and chdir/chroot in initrd root */
69623- sys_chdir("/root");
69624- sys_mount(".", "/", NULL, MS_MOVE, NULL);
69625- sys_chroot(".");
69626+ sys_chdir((const char __force_user *)"/root");
69627+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
69628+ sys_chroot((const char __force_user *)".");
69629
69630 /*
69631 * In case that a resume from disk is carried out by linuxrc or one of
69632@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
69633
69634 /* move initrd to rootfs' /old */
69635 sys_fchdir(old_fd);
69636- sys_mount("/", ".", NULL, MS_MOVE, NULL);
69637+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
69638 /* switch root and cwd back to / of rootfs */
69639 sys_fchdir(root_fd);
69640- sys_chroot(".");
69641+ sys_chroot((const char __force_user *)".");
69642 sys_close(old_fd);
69643 sys_close(root_fd);
69644
69645 if (new_decode_dev(real_root_dev) == Root_RAM0) {
69646- sys_chdir("/old");
69647+ sys_chdir((const char __force_user *)"/old");
69648 return;
69649 }
69650
69651@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
69652 mount_root();
69653
69654 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
69655- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
69656+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
69657 if (!error)
69658 printk("okay\n");
69659 else {
69660- int fd = sys_open("/dev/root.old", O_RDWR, 0);
69661+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
69662 if (error == -ENOENT)
69663 printk("/initrd does not exist. Ignored.\n");
69664 else
69665 printk("failed\n");
69666 printk(KERN_NOTICE "Unmounting old root\n");
69667- sys_umount("/old", MNT_DETACH);
69668+ sys_umount((char __force_user *)"/old", MNT_DETACH);
69669 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
69670 if (fd < 0) {
69671 error = fd;
69672@@ -119,11 +119,11 @@ int __init initrd_load(void)
69673 * mounted in the normal path.
69674 */
69675 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
69676- sys_unlink("/initrd.image");
69677+ sys_unlink((const char __force_user *)"/initrd.image");
69678 handle_initrd();
69679 return 1;
69680 }
69681 }
69682- sys_unlink("/initrd.image");
69683+ sys_unlink((const char __force_user *)"/initrd.image");
69684 return 0;
69685 }
69686diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
69687index 69aebbf..c0bf6a7 100644
69688--- a/init/do_mounts_md.c
69689+++ b/init/do_mounts_md.c
69690@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
69691 partitioned ? "_d" : "", minor,
69692 md_setup_args[ent].device_names);
69693
69694- fd = sys_open(name, 0, 0);
69695+ fd = sys_open((char __force_user *)name, 0, 0);
69696 if (fd < 0) {
69697 printk(KERN_ERR "md: open failed - cannot start "
69698 "array %s\n", name);
69699@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
69700 * array without it
69701 */
69702 sys_close(fd);
69703- fd = sys_open(name, 0, 0);
69704+ fd = sys_open((char __force_user *)name, 0, 0);
69705 sys_ioctl(fd, BLKRRPART, 0);
69706 }
69707 sys_close(fd);
69708@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
69709
69710 wait_for_device_probe();
69711
69712- fd = sys_open("/dev/md0", 0, 0);
69713+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
69714 if (fd >= 0) {
69715 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
69716 sys_close(fd);
69717diff --git a/init/initramfs.c b/init/initramfs.c
69718index 1fd59b8..a01b079 100644
69719--- a/init/initramfs.c
69720+++ b/init/initramfs.c
69721@@ -74,7 +74,7 @@ static void __init free_hash(void)
69722 }
69723 }
69724
69725-static long __init do_utime(char __user *filename, time_t mtime)
69726+static long __init do_utime(__force char __user *filename, time_t mtime)
69727 {
69728 struct timespec t[2];
69729
69730@@ -109,7 +109,7 @@ static void __init dir_utime(void)
69731 struct dir_entry *de, *tmp;
69732 list_for_each_entry_safe(de, tmp, &dir_list, list) {
69733 list_del(&de->list);
69734- do_utime(de->name, de->mtime);
69735+ do_utime((char __force_user *)de->name, de->mtime);
69736 kfree(de->name);
69737 kfree(de);
69738 }
69739@@ -271,7 +271,7 @@ static int __init maybe_link(void)
69740 if (nlink >= 2) {
69741 char *old = find_link(major, minor, ino, mode, collected);
69742 if (old)
69743- return (sys_link(old, collected) < 0) ? -1 : 1;
69744+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
69745 }
69746 return 0;
69747 }
69748@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
69749 {
69750 struct stat st;
69751
69752- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
69753+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
69754 if (S_ISDIR(st.st_mode))
69755- sys_rmdir(path);
69756+ sys_rmdir((char __force_user *)path);
69757 else
69758- sys_unlink(path);
69759+ sys_unlink((char __force_user *)path);
69760 }
69761 }
69762
69763@@ -305,7 +305,7 @@ static int __init do_name(void)
69764 int openflags = O_WRONLY|O_CREAT;
69765 if (ml != 1)
69766 openflags |= O_TRUNC;
69767- wfd = sys_open(collected, openflags, mode);
69768+ wfd = sys_open((char __force_user *)collected, openflags, mode);
69769
69770 if (wfd >= 0) {
69771 sys_fchown(wfd, uid, gid);
69772@@ -317,17 +317,17 @@ static int __init do_name(void)
69773 }
69774 }
69775 } else if (S_ISDIR(mode)) {
69776- sys_mkdir(collected, mode);
69777- sys_chown(collected, uid, gid);
69778- sys_chmod(collected, mode);
69779+ sys_mkdir((char __force_user *)collected, mode);
69780+ sys_chown((char __force_user *)collected, uid, gid);
69781+ sys_chmod((char __force_user *)collected, mode);
69782 dir_add(collected, mtime);
69783 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
69784 S_ISFIFO(mode) || S_ISSOCK(mode)) {
69785 if (maybe_link() == 0) {
69786- sys_mknod(collected, mode, rdev);
69787- sys_chown(collected, uid, gid);
69788- sys_chmod(collected, mode);
69789- do_utime(collected, mtime);
69790+ sys_mknod((char __force_user *)collected, mode, rdev);
69791+ sys_chown((char __force_user *)collected, uid, gid);
69792+ sys_chmod((char __force_user *)collected, mode);
69793+ do_utime((char __force_user *)collected, mtime);
69794 }
69795 }
69796 return 0;
69797@@ -336,15 +336,15 @@ static int __init do_name(void)
69798 static int __init do_copy(void)
69799 {
69800 if (count >= body_len) {
69801- sys_write(wfd, victim, body_len);
69802+ sys_write(wfd, (char __force_user *)victim, body_len);
69803 sys_close(wfd);
69804- do_utime(vcollected, mtime);
69805+ do_utime((char __force_user *)vcollected, mtime);
69806 kfree(vcollected);
69807 eat(body_len);
69808 state = SkipIt;
69809 return 0;
69810 } else {
69811- sys_write(wfd, victim, count);
69812+ sys_write(wfd, (char __force_user *)victim, count);
69813 body_len -= count;
69814 eat(count);
69815 return 1;
69816@@ -355,9 +355,9 @@ static int __init do_symlink(void)
69817 {
69818 collected[N_ALIGN(name_len) + body_len] = '\0';
69819 clean_path(collected, 0);
69820- sys_symlink(collected + N_ALIGN(name_len), collected);
69821- sys_lchown(collected, uid, gid);
69822- do_utime(collected, mtime);
69823+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
69824+ sys_lchown((char __force_user *)collected, uid, gid);
69825+ do_utime((char __force_user *)collected, mtime);
69826 state = SkipIt;
69827 next_state = Reset;
69828 return 0;
69829diff --git a/init/main.c b/init/main.c
69830index 1eb4bd5..da8c6f5 100644
69831--- a/init/main.c
69832+++ b/init/main.c
69833@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
69834 #ifdef CONFIG_TC
69835 extern void tc_init(void);
69836 #endif
69837+extern void grsecurity_init(void);
69838
69839 enum system_states system_state __read_mostly;
69840 EXPORT_SYMBOL(system_state);
69841@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
69842
69843 __setup("reset_devices", set_reset_devices);
69844
69845+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
69846+extern char pax_enter_kernel_user[];
69847+extern char pax_exit_kernel_user[];
69848+extern pgdval_t clone_pgd_mask;
69849+#endif
69850+
69851+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
69852+static int __init setup_pax_nouderef(char *str)
69853+{
69854+#ifdef CONFIG_X86_32
69855+ unsigned int cpu;
69856+ struct desc_struct *gdt;
69857+
69858+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
69859+ gdt = get_cpu_gdt_table(cpu);
69860+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
69861+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
69862+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
69863+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
69864+ }
69865+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
69866+#else
69867+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
69868+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
69869+ clone_pgd_mask = ~(pgdval_t)0UL;
69870+#endif
69871+
69872+ return 0;
69873+}
69874+early_param("pax_nouderef", setup_pax_nouderef);
69875+#endif
69876+
69877+#ifdef CONFIG_PAX_SOFTMODE
69878+int pax_softmode;
69879+
69880+static int __init setup_pax_softmode(char *str)
69881+{
69882+ get_option(&str, &pax_softmode);
69883+ return 1;
69884+}
69885+__setup("pax_softmode=", setup_pax_softmode);
69886+#endif
69887+
69888 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
69889 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
69890 static const char *panic_later, *panic_param;
69891@@ -705,52 +749,53 @@ int initcall_debug;
69892 core_param(initcall_debug, initcall_debug, bool, 0644);
69893
69894 static char msgbuf[64];
69895-static struct boot_trace_call call;
69896-static struct boot_trace_ret ret;
69897+static struct boot_trace_call trace_call;
69898+static struct boot_trace_ret trace_ret;
69899
69900 int do_one_initcall(initcall_t fn)
69901 {
69902 int count = preempt_count();
69903 ktime_t calltime, delta, rettime;
69904+ const char *msg1 = "", *msg2 = "";
69905
69906 if (initcall_debug) {
69907- call.caller = task_pid_nr(current);
69908- printk("calling %pF @ %i\n", fn, call.caller);
69909+ trace_call.caller = task_pid_nr(current);
69910+ printk("calling %pF @ %i\n", fn, trace_call.caller);
69911 calltime = ktime_get();
69912- trace_boot_call(&call, fn);
69913+ trace_boot_call(&trace_call, fn);
69914 enable_boot_trace();
69915 }
69916
69917- ret.result = fn();
69918+ trace_ret.result = fn();
69919
69920 if (initcall_debug) {
69921 disable_boot_trace();
69922 rettime = ktime_get();
69923 delta = ktime_sub(rettime, calltime);
69924- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
69925- trace_boot_ret(&ret, fn);
69926+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
69927+ trace_boot_ret(&trace_ret, fn);
69928 printk("initcall %pF returned %d after %Ld usecs\n", fn,
69929- ret.result, ret.duration);
69930+ trace_ret.result, trace_ret.duration);
69931 }
69932
69933 msgbuf[0] = 0;
69934
69935- if (ret.result && ret.result != -ENODEV && initcall_debug)
69936- sprintf(msgbuf, "error code %d ", ret.result);
69937+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
69938+ sprintf(msgbuf, "error code %d ", trace_ret.result);
69939
69940 if (preempt_count() != count) {
69941- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
69942+ msg1 = " preemption imbalance";
69943 preempt_count() = count;
69944 }
69945 if (irqs_disabled()) {
69946- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
69947+ msg2 = " disabled interrupts";
69948 local_irq_enable();
69949 }
69950- if (msgbuf[0]) {
69951- printk("initcall %pF returned with %s\n", fn, msgbuf);
69952+ if (msgbuf[0] || *msg1 || *msg2) {
69953+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
69954 }
69955
69956- return ret.result;
69957+ return trace_ret.result;
69958 }
69959
69960
69961@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
69962 if (!ramdisk_execute_command)
69963 ramdisk_execute_command = "/init";
69964
69965- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
69966+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
69967 ramdisk_execute_command = NULL;
69968 prepare_namespace();
69969 }
69970
69971+ grsecurity_init();
69972+
69973 /*
69974 * Ok, we have completed the initial bootup, and
69975 * we're essentially up and running. Get rid of the
69976diff --git a/init/noinitramfs.c b/init/noinitramfs.c
69977index f4c1a3a..96c19bd 100644
69978--- a/init/noinitramfs.c
69979+++ b/init/noinitramfs.c
69980@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
69981 {
69982 int err;
69983
69984- err = sys_mkdir("/dev", 0755);
69985+ err = sys_mkdir((const char __user *)"/dev", 0755);
69986 if (err < 0)
69987 goto out;
69988
69989@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
69990 if (err < 0)
69991 goto out;
69992
69993- err = sys_mkdir("/root", 0700);
69994+ err = sys_mkdir((const char __user *)"/root", 0700);
69995 if (err < 0)
69996 goto out;
69997
69998diff --git a/ipc/mqueue.c b/ipc/mqueue.c
69999index d01bc14..8df81db 100644
70000--- a/ipc/mqueue.c
70001+++ b/ipc/mqueue.c
70002@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70003 mq_bytes = (mq_msg_tblsz +
70004 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70005
70006+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70007 spin_lock(&mq_lock);
70008 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70009 u->mq_bytes + mq_bytes >
70010diff --git a/ipc/msg.c b/ipc/msg.c
70011index 779f762..4af9e36 100644
70012--- a/ipc/msg.c
70013+++ b/ipc/msg.c
70014@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
70015 return security_msg_queue_associate(msq, msgflg);
70016 }
70017
70018+static struct ipc_ops msg_ops = {
70019+ .getnew = newque,
70020+ .associate = msg_security,
70021+ .more_checks = NULL
70022+};
70023+
70024 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
70025 {
70026 struct ipc_namespace *ns;
70027- struct ipc_ops msg_ops;
70028 struct ipc_params msg_params;
70029
70030 ns = current->nsproxy->ipc_ns;
70031
70032- msg_ops.getnew = newque;
70033- msg_ops.associate = msg_security;
70034- msg_ops.more_checks = NULL;
70035-
70036 msg_params.key = key;
70037 msg_params.flg = msgflg;
70038
70039diff --git a/ipc/sem.c b/ipc/sem.c
70040index b781007..f738b04 100644
70041--- a/ipc/sem.c
70042+++ b/ipc/sem.c
70043@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
70044 return 0;
70045 }
70046
70047+static struct ipc_ops sem_ops = {
70048+ .getnew = newary,
70049+ .associate = sem_security,
70050+ .more_checks = sem_more_checks
70051+};
70052+
70053 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70054 {
70055 struct ipc_namespace *ns;
70056- struct ipc_ops sem_ops;
70057 struct ipc_params sem_params;
70058
70059 ns = current->nsproxy->ipc_ns;
70060@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70061 if (nsems < 0 || nsems > ns->sc_semmsl)
70062 return -EINVAL;
70063
70064- sem_ops.getnew = newary;
70065- sem_ops.associate = sem_security;
70066- sem_ops.more_checks = sem_more_checks;
70067-
70068 sem_params.key = key;
70069 sem_params.flg = semflg;
70070 sem_params.u.nsems = nsems;
70071@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
70072 ushort* sem_io = fast_sem_io;
70073 int nsems;
70074
70075+ pax_track_stack();
70076+
70077 sma = sem_lock_check(ns, semid);
70078 if (IS_ERR(sma))
70079 return PTR_ERR(sma);
70080@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
70081 unsigned long jiffies_left = 0;
70082 struct ipc_namespace *ns;
70083
70084+ pax_track_stack();
70085+
70086 ns = current->nsproxy->ipc_ns;
70087
70088 if (nsops < 1 || semid < 0)
70089diff --git a/ipc/shm.c b/ipc/shm.c
70090index d30732c..7379456 100644
70091--- a/ipc/shm.c
70092+++ b/ipc/shm.c
70093@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
70094 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70095 #endif
70096
70097+#ifdef CONFIG_GRKERNSEC
70098+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70099+ const time_t shm_createtime, const uid_t cuid,
70100+ const int shmid);
70101+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70102+ const time_t shm_createtime);
70103+#endif
70104+
70105 void shm_init_ns(struct ipc_namespace *ns)
70106 {
70107 ns->shm_ctlmax = SHMMAX;
70108@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
70109 shp->shm_lprid = 0;
70110 shp->shm_atim = shp->shm_dtim = 0;
70111 shp->shm_ctim = get_seconds();
70112+#ifdef CONFIG_GRKERNSEC
70113+ {
70114+ struct timespec timeval;
70115+ do_posix_clock_monotonic_gettime(&timeval);
70116+
70117+ shp->shm_createtime = timeval.tv_sec;
70118+ }
70119+#endif
70120 shp->shm_segsz = size;
70121 shp->shm_nattch = 0;
70122 shp->shm_file = file;
70123@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
70124 return 0;
70125 }
70126
70127+static struct ipc_ops shm_ops = {
70128+ .getnew = newseg,
70129+ .associate = shm_security,
70130+ .more_checks = shm_more_checks
70131+};
70132+
70133 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
70134 {
70135 struct ipc_namespace *ns;
70136- struct ipc_ops shm_ops;
70137 struct ipc_params shm_params;
70138
70139 ns = current->nsproxy->ipc_ns;
70140
70141- shm_ops.getnew = newseg;
70142- shm_ops.associate = shm_security;
70143- shm_ops.more_checks = shm_more_checks;
70144-
70145 shm_params.key = key;
70146 shm_params.flg = shmflg;
70147 shm_params.u.size = size;
70148@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
70149 if (err)
70150 goto out_unlock;
70151
70152+#ifdef CONFIG_GRKERNSEC
70153+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
70154+ shp->shm_perm.cuid, shmid) ||
70155+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
70156+ err = -EACCES;
70157+ goto out_unlock;
70158+ }
70159+#endif
70160+
70161 path.dentry = dget(shp->shm_file->f_path.dentry);
70162 path.mnt = shp->shm_file->f_path.mnt;
70163 shp->shm_nattch++;
70164+#ifdef CONFIG_GRKERNSEC
70165+ shp->shm_lapid = current->pid;
70166+#endif
70167 size = i_size_read(path.dentry->d_inode);
70168 shm_unlock(shp);
70169
70170diff --git a/kernel/acct.c b/kernel/acct.c
70171index a6605ca..ca91111 100644
70172--- a/kernel/acct.c
70173+++ b/kernel/acct.c
70174@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
70175 */
70176 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
70177 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
70178- file->f_op->write(file, (char *)&ac,
70179+ file->f_op->write(file, (char __force_user *)&ac,
70180 sizeof(acct_t), &file->f_pos);
70181 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
70182 set_fs(fs);
70183diff --git a/kernel/audit.c b/kernel/audit.c
70184index 5feed23..513b02c 100644
70185--- a/kernel/audit.c
70186+++ b/kernel/audit.c
70187@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
70188 3) suppressed due to audit_rate_limit
70189 4) suppressed due to audit_backlog_limit
70190 */
70191-static atomic_t audit_lost = ATOMIC_INIT(0);
70192+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
70193
70194 /* The netlink socket. */
70195 static struct sock *audit_sock;
70196@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
70197 unsigned long now;
70198 int print;
70199
70200- atomic_inc(&audit_lost);
70201+ atomic_inc_unchecked(&audit_lost);
70202
70203 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
70204
70205@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
70206 printk(KERN_WARNING
70207 "audit: audit_lost=%d audit_rate_limit=%d "
70208 "audit_backlog_limit=%d\n",
70209- atomic_read(&audit_lost),
70210+ atomic_read_unchecked(&audit_lost),
70211 audit_rate_limit,
70212 audit_backlog_limit);
70213 audit_panic(message);
70214@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70215 status_set.pid = audit_pid;
70216 status_set.rate_limit = audit_rate_limit;
70217 status_set.backlog_limit = audit_backlog_limit;
70218- status_set.lost = atomic_read(&audit_lost);
70219+ status_set.lost = atomic_read_unchecked(&audit_lost);
70220 status_set.backlog = skb_queue_len(&audit_skb_queue);
70221 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
70222 &status_set, sizeof(status_set));
70223@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70224 spin_unlock_irq(&tsk->sighand->siglock);
70225 }
70226 read_unlock(&tasklist_lock);
70227- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
70228- &s, sizeof(s));
70229+
70230+ if (!err)
70231+ audit_send_reply(NETLINK_CB(skb).pid, seq,
70232+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
70233 break;
70234 }
70235 case AUDIT_TTY_SET: {
70236diff --git a/kernel/auditsc.c b/kernel/auditsc.c
70237index 267e484..f8e295a 100644
70238--- a/kernel/auditsc.c
70239+++ b/kernel/auditsc.c
70240@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
70241 }
70242
70243 /* global counter which is incremented every time something logs in */
70244-static atomic_t session_id = ATOMIC_INIT(0);
70245+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
70246
70247 /**
70248 * audit_set_loginuid - set a task's audit_context loginuid
70249@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
70250 */
70251 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
70252 {
70253- unsigned int sessionid = atomic_inc_return(&session_id);
70254+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
70255 struct audit_context *context = task->audit_context;
70256
70257 if (context && context->in_syscall) {
70258diff --git a/kernel/capability.c b/kernel/capability.c
70259index 8a944f5..db5001e 100644
70260--- a/kernel/capability.c
70261+++ b/kernel/capability.c
70262@@ -305,10 +305,26 @@ int capable(int cap)
70263 BUG();
70264 }
70265
70266- if (security_capable(cap) == 0) {
70267+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
70268 current->flags |= PF_SUPERPRIV;
70269 return 1;
70270 }
70271 return 0;
70272 }
70273+
70274+int capable_nolog(int cap)
70275+{
70276+ if (unlikely(!cap_valid(cap))) {
70277+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
70278+ BUG();
70279+ }
70280+
70281+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
70282+ current->flags |= PF_SUPERPRIV;
70283+ return 1;
70284+ }
70285+ return 0;
70286+}
70287+
70288 EXPORT_SYMBOL(capable);
70289+EXPORT_SYMBOL(capable_nolog);
70290diff --git a/kernel/cgroup.c b/kernel/cgroup.c
70291index 1fbcc74..7000012 100644
70292--- a/kernel/cgroup.c
70293+++ b/kernel/cgroup.c
70294@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
70295 struct hlist_head *hhead;
70296 struct cg_cgroup_link *link;
70297
70298+ pax_track_stack();
70299+
70300 /* First see if we already have a cgroup group that matches
70301 * the desired set */
70302 read_lock(&css_set_lock);
70303diff --git a/kernel/compat.c b/kernel/compat.c
70304index 8bc5578..186e44a 100644
70305--- a/kernel/compat.c
70306+++ b/kernel/compat.c
70307@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
70308 mm_segment_t oldfs;
70309 long ret;
70310
70311- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
70312+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
70313 oldfs = get_fs();
70314 set_fs(KERNEL_DS);
70315 ret = hrtimer_nanosleep_restart(restart);
70316@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
70317 oldfs = get_fs();
70318 set_fs(KERNEL_DS);
70319 ret = hrtimer_nanosleep(&tu,
70320- rmtp ? (struct timespec __user *)&rmt : NULL,
70321+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
70322 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
70323 set_fs(oldfs);
70324
70325@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
70326 mm_segment_t old_fs = get_fs();
70327
70328 set_fs(KERNEL_DS);
70329- ret = sys_sigpending((old_sigset_t __user *) &s);
70330+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
70331 set_fs(old_fs);
70332 if (ret == 0)
70333 ret = put_user(s, set);
70334@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
70335 old_fs = get_fs();
70336 set_fs(KERNEL_DS);
70337 ret = sys_sigprocmask(how,
70338- set ? (old_sigset_t __user *) &s : NULL,
70339- oset ? (old_sigset_t __user *) &s : NULL);
70340+ set ? (old_sigset_t __force_user *) &s : NULL,
70341+ oset ? (old_sigset_t __force_user *) &s : NULL);
70342 set_fs(old_fs);
70343 if (ret == 0)
70344 if (oset)
70345@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
70346 mm_segment_t old_fs = get_fs();
70347
70348 set_fs(KERNEL_DS);
70349- ret = sys_old_getrlimit(resource, &r);
70350+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
70351 set_fs(old_fs);
70352
70353 if (!ret) {
70354@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
70355 mm_segment_t old_fs = get_fs();
70356
70357 set_fs(KERNEL_DS);
70358- ret = sys_getrusage(who, (struct rusage __user *) &r);
70359+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
70360 set_fs(old_fs);
70361
70362 if (ret)
70363@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
70364 set_fs (KERNEL_DS);
70365 ret = sys_wait4(pid,
70366 (stat_addr ?
70367- (unsigned int __user *) &status : NULL),
70368- options, (struct rusage __user *) &r);
70369+ (unsigned int __force_user *) &status : NULL),
70370+ options, (struct rusage __force_user *) &r);
70371 set_fs (old_fs);
70372
70373 if (ret > 0) {
70374@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
70375 memset(&info, 0, sizeof(info));
70376
70377 set_fs(KERNEL_DS);
70378- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
70379- uru ? (struct rusage __user *)&ru : NULL);
70380+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
70381+ uru ? (struct rusage __force_user *)&ru : NULL);
70382 set_fs(old_fs);
70383
70384 if ((ret < 0) || (info.si_signo == 0))
70385@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
70386 oldfs = get_fs();
70387 set_fs(KERNEL_DS);
70388 err = sys_timer_settime(timer_id, flags,
70389- (struct itimerspec __user *) &newts,
70390- (struct itimerspec __user *) &oldts);
70391+ (struct itimerspec __force_user *) &newts,
70392+ (struct itimerspec __force_user *) &oldts);
70393 set_fs(oldfs);
70394 if (!err && old && put_compat_itimerspec(old, &oldts))
70395 return -EFAULT;
70396@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
70397 oldfs = get_fs();
70398 set_fs(KERNEL_DS);
70399 err = sys_timer_gettime(timer_id,
70400- (struct itimerspec __user *) &ts);
70401+ (struct itimerspec __force_user *) &ts);
70402 set_fs(oldfs);
70403 if (!err && put_compat_itimerspec(setting, &ts))
70404 return -EFAULT;
70405@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
70406 oldfs = get_fs();
70407 set_fs(KERNEL_DS);
70408 err = sys_clock_settime(which_clock,
70409- (struct timespec __user *) &ts);
70410+ (struct timespec __force_user *) &ts);
70411 set_fs(oldfs);
70412 return err;
70413 }
70414@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
70415 oldfs = get_fs();
70416 set_fs(KERNEL_DS);
70417 err = sys_clock_gettime(which_clock,
70418- (struct timespec __user *) &ts);
70419+ (struct timespec __force_user *) &ts);
70420 set_fs(oldfs);
70421 if (!err && put_compat_timespec(&ts, tp))
70422 return -EFAULT;
70423@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
70424 oldfs = get_fs();
70425 set_fs(KERNEL_DS);
70426 err = sys_clock_getres(which_clock,
70427- (struct timespec __user *) &ts);
70428+ (struct timespec __force_user *) &ts);
70429 set_fs(oldfs);
70430 if (!err && tp && put_compat_timespec(&ts, tp))
70431 return -EFAULT;
70432@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
70433 long err;
70434 mm_segment_t oldfs;
70435 struct timespec tu;
70436- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
70437+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
70438
70439- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
70440+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
70441 oldfs = get_fs();
70442 set_fs(KERNEL_DS);
70443 err = clock_nanosleep_restart(restart);
70444@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
70445 oldfs = get_fs();
70446 set_fs(KERNEL_DS);
70447 err = sys_clock_nanosleep(which_clock, flags,
70448- (struct timespec __user *) &in,
70449- (struct timespec __user *) &out);
70450+ (struct timespec __force_user *) &in,
70451+ (struct timespec __force_user *) &out);
70452 set_fs(oldfs);
70453
70454 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
70455diff --git a/kernel/configs.c b/kernel/configs.c
70456index abaee68..047facd 100644
70457--- a/kernel/configs.c
70458+++ b/kernel/configs.c
70459@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
70460 struct proc_dir_entry *entry;
70461
70462 /* create the current config file */
70463+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
70464+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
70465+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
70466+ &ikconfig_file_ops);
70467+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70468+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
70469+ &ikconfig_file_ops);
70470+#endif
70471+#else
70472 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
70473 &ikconfig_file_ops);
70474+#endif
70475+
70476 if (!entry)
70477 return -ENOMEM;
70478
70479diff --git a/kernel/cpu.c b/kernel/cpu.c
70480index 7e8b6ac..8921388 100644
70481--- a/kernel/cpu.c
70482+++ b/kernel/cpu.c
70483@@ -19,7 +19,7 @@
70484 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
70485 static DEFINE_MUTEX(cpu_add_remove_lock);
70486
70487-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
70488+static RAW_NOTIFIER_HEAD(cpu_chain);
70489
70490 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
70491 * Should always be manipulated under cpu_add_remove_lock
70492diff --git a/kernel/cred.c b/kernel/cred.c
70493index 0b5b5fc..419b86a 100644
70494--- a/kernel/cred.c
70495+++ b/kernel/cred.c
70496@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
70497 */
70498 void __put_cred(struct cred *cred)
70499 {
70500+ pax_track_stack();
70501+
70502 kdebug("__put_cred(%p{%d,%d})", cred,
70503 atomic_read(&cred->usage),
70504 read_cred_subscribers(cred));
70505@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
70506 {
70507 struct cred *cred;
70508
70509+ pax_track_stack();
70510+
70511 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
70512 atomic_read(&tsk->cred->usage),
70513 read_cred_subscribers(tsk->cred));
70514@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct task_struct *task)
70515 {
70516 const struct cred *cred;
70517
70518+ pax_track_stack();
70519+
70520 rcu_read_lock();
70521
70522 do {
70523@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
70524 {
70525 struct cred *new;
70526
70527+ pax_track_stack();
70528+
70529 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
70530 if (!new)
70531 return NULL;
70532@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
70533 const struct cred *old;
70534 struct cred *new;
70535
70536+ pax_track_stack();
70537+
70538 validate_process_creds();
70539
70540 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70541@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
70542 struct thread_group_cred *tgcred = NULL;
70543 struct cred *new;
70544
70545+ pax_track_stack();
70546+
70547 #ifdef CONFIG_KEYS
70548 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
70549 if (!tgcred)
70550@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
70551 struct cred *new;
70552 int ret;
70553
70554+ pax_track_stack();
70555+
70556 mutex_init(&p->cred_guard_mutex);
70557
70558 if (
70559@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
70560 struct task_struct *task = current;
70561 const struct cred *old = task->real_cred;
70562
70563+ pax_track_stack();
70564+
70565 kdebug("commit_creds(%p{%d,%d})", new,
70566 atomic_read(&new->usage),
70567 read_cred_subscribers(new));
70568@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
70569
70570 get_cred(new); /* we will require a ref for the subj creds too */
70571
70572+ gr_set_role_label(task, new->uid, new->gid);
70573+
70574 /* dumpability changes */
70575 if (old->euid != new->euid ||
70576 old->egid != new->egid ||
70577@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
70578 key_fsgid_changed(task);
70579
70580 /* do it
70581- * - What if a process setreuid()'s and this brings the
70582- * new uid over his NPROC rlimit? We can check this now
70583- * cheaply with the new uid cache, so if it matters
70584- * we should be checking for it. -DaveM
70585+ * RLIMIT_NPROC limits on user->processes have already been checked
70586+ * in set_user().
70587 */
70588 alter_cred_subscribers(new, 2);
70589 if (new->user != old->user)
70590@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
70591 */
70592 void abort_creds(struct cred *new)
70593 {
70594+ pax_track_stack();
70595+
70596 kdebug("abort_creds(%p{%d,%d})", new,
70597 atomic_read(&new->usage),
70598 read_cred_subscribers(new));
70599@@ -629,6 +647,8 @@ const struct cred *override_creds(const struct cred *new)
70600 {
70601 const struct cred *old = current->cred;
70602
70603+ pax_track_stack();
70604+
70605 kdebug("override_creds(%p{%d,%d})", new,
70606 atomic_read(&new->usage),
70607 read_cred_subscribers(new));
70608@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old)
70609 {
70610 const struct cred *override = current->cred;
70611
70612+ pax_track_stack();
70613+
70614 kdebug("revert_creds(%p{%d,%d})", old,
70615 atomic_read(&old->usage),
70616 read_cred_subscribers(old));
70617@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
70618 const struct cred *old;
70619 struct cred *new;
70620
70621+ pax_track_stack();
70622+
70623 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70624 if (!new)
70625 return NULL;
70626@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
70627 */
70628 int set_security_override(struct cred *new, u32 secid)
70629 {
70630+ pax_track_stack();
70631+
70632 return security_kernel_act_as(new, secid);
70633 }
70634 EXPORT_SYMBOL(set_security_override);
70635@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
70636 u32 secid;
70637 int ret;
70638
70639+ pax_track_stack();
70640+
70641 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
70642 if (ret < 0)
70643 return ret;
70644diff --git a/kernel/exit.c b/kernel/exit.c
70645index 0f8fae3..11757f1 100644
70646--- a/kernel/exit.c
70647+++ b/kernel/exit.c
70648@@ -55,6 +55,10 @@
70649 #include <asm/pgtable.h>
70650 #include <asm/mmu_context.h>
70651
70652+#ifdef CONFIG_GRKERNSEC
70653+extern rwlock_t grsec_exec_file_lock;
70654+#endif
70655+
70656 static void exit_mm(struct task_struct * tsk);
70657
70658 static void __unhash_process(struct task_struct *p)
70659@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
70660 struct task_struct *leader;
70661 int zap_leader;
70662 repeat:
70663+#ifdef CONFIG_NET
70664+ gr_del_task_from_ip_table(p);
70665+#endif
70666+
70667 tracehook_prepare_release_task(p);
70668 /* don't need to get the RCU readlock here - the process is dead and
70669 * can't be modifying its own credentials */
70670@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
70671 {
70672 write_lock_irq(&tasklist_lock);
70673
70674+#ifdef CONFIG_GRKERNSEC
70675+ write_lock(&grsec_exec_file_lock);
70676+ if (current->exec_file) {
70677+ fput(current->exec_file);
70678+ current->exec_file = NULL;
70679+ }
70680+ write_unlock(&grsec_exec_file_lock);
70681+#endif
70682+
70683 ptrace_unlink(current);
70684 /* Reparent to init */
70685 current->real_parent = current->parent = kthreadd_task;
70686 list_move_tail(&current->sibling, &current->real_parent->children);
70687
70688+ gr_set_kernel_label(current);
70689+
70690 /* Set the exit signal to SIGCHLD so we signal init on exit */
70691 current->exit_signal = SIGCHLD;
70692
70693@@ -397,7 +416,7 @@ int allow_signal(int sig)
70694 * know it'll be handled, so that they don't get converted to
70695 * SIGKILL or just silently dropped.
70696 */
70697- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
70698+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
70699 recalc_sigpending();
70700 spin_unlock_irq(&current->sighand->siglock);
70701 return 0;
70702@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
70703 vsnprintf(current->comm, sizeof(current->comm), name, args);
70704 va_end(args);
70705
70706+#ifdef CONFIG_GRKERNSEC
70707+ write_lock(&grsec_exec_file_lock);
70708+ if (current->exec_file) {
70709+ fput(current->exec_file);
70710+ current->exec_file = NULL;
70711+ }
70712+ write_unlock(&grsec_exec_file_lock);
70713+#endif
70714+
70715+ gr_set_kernel_label(current);
70716+
70717 /*
70718 * If we were started as result of loading a module, close all of the
70719 * user space pages. We don't need them, and if we didn't close them
70720@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
70721 struct task_struct *tsk = current;
70722 int group_dead;
70723
70724- profile_task_exit(tsk);
70725-
70726- WARN_ON(atomic_read(&tsk->fs_excl));
70727-
70728+ /*
70729+ * Check this first since set_fs() below depends on
70730+ * current_thread_info(), which we better not access when we're in
70731+ * interrupt context. Other than that, we want to do the set_fs()
70732+ * as early as possible.
70733+ */
70734 if (unlikely(in_interrupt()))
70735 panic("Aiee, killing interrupt handler!");
70736- if (unlikely(!tsk->pid))
70737- panic("Attempted to kill the idle task!");
70738
70739 /*
70740- * If do_exit is called because this processes oopsed, it's possible
70741+ * If do_exit is called because this processes Oops'ed, it's possible
70742 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
70743 * continuing. Amongst other possible reasons, this is to prevent
70744 * mm_release()->clear_child_tid() from writing to a user-controlled
70745@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
70746 */
70747 set_fs(USER_DS);
70748
70749+ profile_task_exit(tsk);
70750+
70751+ WARN_ON(atomic_read(&tsk->fs_excl));
70752+
70753+ if (unlikely(!tsk->pid))
70754+ panic("Attempted to kill the idle task!");
70755+
70756 tracehook_report_exit(&code);
70757
70758 validate_creds_for_do_exit(tsk);
70759@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
70760 tsk->exit_code = code;
70761 taskstats_exit(tsk, group_dead);
70762
70763+ gr_acl_handle_psacct(tsk, code);
70764+ gr_acl_handle_exit();
70765+
70766 exit_mm(tsk);
70767
70768 if (group_dead)
70769@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
70770
70771 if (unlikely(wo->wo_flags & WNOWAIT)) {
70772 int exit_code = p->exit_code;
70773- int why, status;
70774+ int why;
70775
70776 get_task_struct(p);
70777 read_unlock(&tasklist_lock);
70778diff --git a/kernel/fork.c b/kernel/fork.c
70779index 4bde56f..29a9bab 100644
70780--- a/kernel/fork.c
70781+++ b/kernel/fork.c
70782@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
70783 *stackend = STACK_END_MAGIC; /* for overflow detection */
70784
70785 #ifdef CONFIG_CC_STACKPROTECTOR
70786- tsk->stack_canary = get_random_int();
70787+ tsk->stack_canary = pax_get_random_long();
70788 #endif
70789
70790 /* One for us, one for whoever does the "release_task()" (usually parent) */
70791@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70792 mm->locked_vm = 0;
70793 mm->mmap = NULL;
70794 mm->mmap_cache = NULL;
70795- mm->free_area_cache = oldmm->mmap_base;
70796- mm->cached_hole_size = ~0UL;
70797+ mm->free_area_cache = oldmm->free_area_cache;
70798+ mm->cached_hole_size = oldmm->cached_hole_size;
70799 mm->map_count = 0;
70800 cpumask_clear(mm_cpumask(mm));
70801 mm->mm_rb = RB_ROOT;
70802@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70803 tmp->vm_flags &= ~VM_LOCKED;
70804 tmp->vm_mm = mm;
70805 tmp->vm_next = tmp->vm_prev = NULL;
70806+ tmp->vm_mirror = NULL;
70807 anon_vma_link(tmp);
70808 file = tmp->vm_file;
70809 if (file) {
70810@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
70811 if (retval)
70812 goto out;
70813 }
70814+
70815+#ifdef CONFIG_PAX_SEGMEXEC
70816+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
70817+ struct vm_area_struct *mpnt_m;
70818+
70819+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
70820+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
70821+
70822+ if (!mpnt->vm_mirror)
70823+ continue;
70824+
70825+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
70826+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
70827+ mpnt->vm_mirror = mpnt_m;
70828+ } else {
70829+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
70830+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
70831+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
70832+ mpnt->vm_mirror->vm_mirror = mpnt;
70833+ }
70834+ }
70835+ BUG_ON(mpnt_m);
70836+ }
70837+#endif
70838+
70839 /* a new mm has just been created */
70840 arch_dup_mmap(oldmm, mm);
70841 retval = 0;
70842@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
70843 write_unlock(&fs->lock);
70844 return -EAGAIN;
70845 }
70846- fs->users++;
70847+ atomic_inc(&fs->users);
70848 write_unlock(&fs->lock);
70849 return 0;
70850 }
70851 tsk->fs = copy_fs_struct(fs);
70852 if (!tsk->fs)
70853 return -ENOMEM;
70854+ gr_set_chroot_entries(tsk, &tsk->fs->root);
70855 return 0;
70856 }
70857
70858@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70859 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
70860 #endif
70861 retval = -EAGAIN;
70862+
70863+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
70864+
70865 if (atomic_read(&p->real_cred->user->processes) >=
70866 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
70867- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
70868- p->real_cred->user != INIT_USER)
70869+ if (p->real_cred->user != INIT_USER &&
70870+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
70871 goto bad_fork_free;
70872 }
70873+ current->flags &= ~PF_NPROC_EXCEEDED;
70874
70875 retval = copy_creds(p, clone_flags);
70876 if (retval < 0)
70877@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
70878 goto bad_fork_free_pid;
70879 }
70880
70881+ gr_copy_label(p);
70882+
70883 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
70884 /*
70885 * Clear TID on mm_release()?
70886@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
70887 bad_fork_free:
70888 free_task(p);
70889 fork_out:
70890+ gr_log_forkfail(retval);
70891+
70892 return ERR_PTR(retval);
70893 }
70894
70895@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
70896 if (clone_flags & CLONE_PARENT_SETTID)
70897 put_user(nr, parent_tidptr);
70898
70899+ gr_handle_brute_check();
70900+
70901 if (clone_flags & CLONE_VFORK) {
70902 p->vfork_done = &vfork;
70903 init_completion(&vfork);
70904@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
70905 return 0;
70906
70907 /* don't need lock here; in the worst case we'll do useless copy */
70908- if (fs->users == 1)
70909+ if (atomic_read(&fs->users) == 1)
70910 return 0;
70911
70912 *new_fsp = copy_fs_struct(fs);
70913@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
70914 fs = current->fs;
70915 write_lock(&fs->lock);
70916 current->fs = new_fs;
70917- if (--fs->users)
70918+ gr_set_chroot_entries(current, &current->fs->root);
70919+ if (atomic_dec_return(&fs->users))
70920 new_fs = NULL;
70921 else
70922 new_fs = fs;
70923diff --git a/kernel/futex.c b/kernel/futex.c
70924index fb98c9f..f158c0c 100644
70925--- a/kernel/futex.c
70926+++ b/kernel/futex.c
70927@@ -54,6 +54,7 @@
70928 #include <linux/mount.h>
70929 #include <linux/pagemap.h>
70930 #include <linux/syscalls.h>
70931+#include <linux/ptrace.h>
70932 #include <linux/signal.h>
70933 #include <linux/module.h>
70934 #include <linux/magic.h>
70935@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
70936 struct page *page;
70937 int err, ro = 0;
70938
70939+#ifdef CONFIG_PAX_SEGMEXEC
70940+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
70941+ return -EFAULT;
70942+#endif
70943+
70944 /*
70945 * The futex address must be "naturally" aligned.
70946 */
70947@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
70948 struct futex_q q;
70949 int ret;
70950
70951+ pax_track_stack();
70952+
70953 if (!bitset)
70954 return -EINVAL;
70955
70956@@ -1871,7 +1879,7 @@ retry:
70957
70958 restart = &current_thread_info()->restart_block;
70959 restart->fn = futex_wait_restart;
70960- restart->futex.uaddr = (u32 *)uaddr;
70961+ restart->futex.uaddr = uaddr;
70962 restart->futex.val = val;
70963 restart->futex.time = abs_time->tv64;
70964 restart->futex.bitset = bitset;
70965@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
70966 struct futex_q q;
70967 int res, ret;
70968
70969+ pax_track_stack();
70970+
70971 if (!bitset)
70972 return -EINVAL;
70973
70974@@ -2407,7 +2417,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
70975 {
70976 struct robust_list_head __user *head;
70977 unsigned long ret;
70978+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
70979 const struct cred *cred = current_cred(), *pcred;
70980+#endif
70981
70982 if (!futex_cmpxchg_enabled)
70983 return -ENOSYS;
70984@@ -2423,11 +2435,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
70985 if (!p)
70986 goto err_unlock;
70987 ret = -EPERM;
70988+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70989+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
70990+ goto err_unlock;
70991+#else
70992 pcred = __task_cred(p);
70993 if (cred->euid != pcred->euid &&
70994 cred->euid != pcred->uid &&
70995 !capable(CAP_SYS_PTRACE))
70996 goto err_unlock;
70997+#endif
70998 head = p->robust_list;
70999 rcu_read_unlock();
71000 }
71001@@ -2489,7 +2506,7 @@ retry:
71002 */
71003 static inline int fetch_robust_entry(struct robust_list __user **entry,
71004 struct robust_list __user * __user *head,
71005- int *pi)
71006+ unsigned int *pi)
71007 {
71008 unsigned long uentry;
71009
71010@@ -2670,6 +2687,7 @@ static int __init futex_init(void)
71011 {
71012 u32 curval;
71013 int i;
71014+ mm_segment_t oldfs;
71015
71016 /*
71017 * This will fail and we want it. Some arch implementations do
71018@@ -2681,7 +2699,10 @@ static int __init futex_init(void)
71019 * implementation, the non functional ones will return
71020 * -ENOSYS.
71021 */
71022+ oldfs = get_fs();
71023+ set_fs(USER_DS);
71024 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
71025+ set_fs(oldfs);
71026 if (curval == -EFAULT)
71027 futex_cmpxchg_enabled = 1;
71028
71029diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
71030index 2357165..8d70cee 100644
71031--- a/kernel/futex_compat.c
71032+++ b/kernel/futex_compat.c
71033@@ -10,6 +10,7 @@
71034 #include <linux/compat.h>
71035 #include <linux/nsproxy.h>
71036 #include <linux/futex.h>
71037+#include <linux/ptrace.h>
71038
71039 #include <asm/uaccess.h>
71040
71041@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71042 {
71043 struct compat_robust_list_head __user *head;
71044 unsigned long ret;
71045- const struct cred *cred = current_cred(), *pcred;
71046+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
71047+ const struct cred *cred = current_cred();
71048+ const struct cred *pcred;
71049+#endif
71050
71051 if (!futex_cmpxchg_enabled)
71052 return -ENOSYS;
71053@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71054 if (!p)
71055 goto err_unlock;
71056 ret = -EPERM;
71057+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71058+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
71059+ goto err_unlock;
71060+#else
71061 pcred = __task_cred(p);
71062 if (cred->euid != pcred->euid &&
71063 cred->euid != pcred->uid &&
71064 !capable(CAP_SYS_PTRACE))
71065 goto err_unlock;
71066+#endif
71067 head = p->compat_robust_list;
71068 read_unlock(&tasklist_lock);
71069 }
71070diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
71071index 9b22d03..6295b62 100644
71072--- a/kernel/gcov/base.c
71073+++ b/kernel/gcov/base.c
71074@@ -102,11 +102,6 @@ void gcov_enable_events(void)
71075 }
71076
71077 #ifdef CONFIG_MODULES
71078-static inline int within(void *addr, void *start, unsigned long size)
71079-{
71080- return ((addr >= start) && (addr < start + size));
71081-}
71082-
71083 /* Update list and generate events when modules are unloaded. */
71084 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71085 void *data)
71086@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71087 prev = NULL;
71088 /* Remove entries located in module from linked list. */
71089 for (info = gcov_info_head; info; info = info->next) {
71090- if (within(info, mod->module_core, mod->core_size)) {
71091+ if (within_module_core_rw((unsigned long)info, mod)) {
71092 if (prev)
71093 prev->next = info->next;
71094 else
71095diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
71096index a6e9d00..a0da4f9 100644
71097--- a/kernel/hrtimer.c
71098+++ b/kernel/hrtimer.c
71099@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
71100 local_irq_restore(flags);
71101 }
71102
71103-static void run_hrtimer_softirq(struct softirq_action *h)
71104+static void run_hrtimer_softirq(void)
71105 {
71106 hrtimer_peek_ahead_timers();
71107 }
71108diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
71109index 8b6b8b6..6bc87df 100644
71110--- a/kernel/kallsyms.c
71111+++ b/kernel/kallsyms.c
71112@@ -11,6 +11,9 @@
71113 * Changed the compression method from stem compression to "table lookup"
71114 * compression (see scripts/kallsyms.c for a more complete description)
71115 */
71116+#ifdef CONFIG_GRKERNSEC_HIDESYM
71117+#define __INCLUDED_BY_HIDESYM 1
71118+#endif
71119 #include <linux/kallsyms.h>
71120 #include <linux/module.h>
71121 #include <linux/init.h>
71122@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
71123
71124 static inline int is_kernel_inittext(unsigned long addr)
71125 {
71126+ if (system_state != SYSTEM_BOOTING)
71127+ return 0;
71128+
71129 if (addr >= (unsigned long)_sinittext
71130 && addr <= (unsigned long)_einittext)
71131 return 1;
71132 return 0;
71133 }
71134
71135+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71136+#ifdef CONFIG_MODULES
71137+static inline int is_module_text(unsigned long addr)
71138+{
71139+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
71140+ return 1;
71141+
71142+ addr = ktla_ktva(addr);
71143+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
71144+}
71145+#else
71146+static inline int is_module_text(unsigned long addr)
71147+{
71148+ return 0;
71149+}
71150+#endif
71151+#endif
71152+
71153 static inline int is_kernel_text(unsigned long addr)
71154 {
71155 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
71156@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
71157
71158 static inline int is_kernel(unsigned long addr)
71159 {
71160+
71161+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71162+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
71163+ return 1;
71164+
71165+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
71166+#else
71167 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
71168+#endif
71169+
71170 return 1;
71171 return in_gate_area_no_task(addr);
71172 }
71173
71174 static int is_ksym_addr(unsigned long addr)
71175 {
71176+
71177+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71178+ if (is_module_text(addr))
71179+ return 0;
71180+#endif
71181+
71182 if (all_var)
71183 return is_kernel(addr);
71184
71185@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
71186
71187 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
71188 {
71189- iter->name[0] = '\0';
71190 iter->nameoff = get_symbol_offset(new_pos);
71191 iter->pos = new_pos;
71192 }
71193@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
71194 {
71195 struct kallsym_iter *iter = m->private;
71196
71197+#ifdef CONFIG_GRKERNSEC_HIDESYM
71198+ if (current_uid())
71199+ return 0;
71200+#endif
71201+
71202 /* Some debugging symbols have no name. Ignore them. */
71203 if (!iter->name[0])
71204 return 0;
71205@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
71206 struct kallsym_iter *iter;
71207 int ret;
71208
71209- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
71210+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
71211 if (!iter)
71212 return -ENOMEM;
71213 reset_iter(iter, 0);
71214diff --git a/kernel/kexec.c b/kernel/kexec.c
71215index f336e21..9c1c20b 100644
71216--- a/kernel/kexec.c
71217+++ b/kernel/kexec.c
71218@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
71219 unsigned long flags)
71220 {
71221 struct compat_kexec_segment in;
71222- struct kexec_segment out, __user *ksegments;
71223+ struct kexec_segment out;
71224+ struct kexec_segment __user *ksegments;
71225 unsigned long i, result;
71226
71227 /* Don't allow clients that don't understand the native
71228diff --git a/kernel/kgdb.c b/kernel/kgdb.c
71229index 53dae4b..9ba3743 100644
71230--- a/kernel/kgdb.c
71231+++ b/kernel/kgdb.c
71232@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
71233 /* Guard for recursive entry */
71234 static int exception_level;
71235
71236-static struct kgdb_io *kgdb_io_ops;
71237+static const struct kgdb_io *kgdb_io_ops;
71238 static DEFINE_SPINLOCK(kgdb_registration_lock);
71239
71240 /* kgdb console driver is loaded */
71241@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
71242 */
71243 static atomic_t passive_cpu_wait[NR_CPUS];
71244 static atomic_t cpu_in_kgdb[NR_CPUS];
71245-atomic_t kgdb_setting_breakpoint;
71246+atomic_unchecked_t kgdb_setting_breakpoint;
71247
71248 struct task_struct *kgdb_usethread;
71249 struct task_struct *kgdb_contthread;
71250@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
71251 sizeof(unsigned long)];
71252
71253 /* to keep track of the CPU which is doing the single stepping*/
71254-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71255+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71256
71257 /*
71258 * If you are debugging a problem where roundup (the collection of
71259@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
71260 return 0;
71261 if (kgdb_connected)
71262 return 1;
71263- if (atomic_read(&kgdb_setting_breakpoint))
71264+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
71265 return 1;
71266 if (print_wait)
71267 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
71268@@ -1426,8 +1426,8 @@ acquirelock:
71269 * instance of the exception handler wanted to come into the
71270 * debugger on a different CPU via a single step
71271 */
71272- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
71273- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
71274+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
71275+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
71276
71277 atomic_set(&kgdb_active, -1);
71278 touch_softlockup_watchdog();
71279@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
71280 *
71281 * Register it with the KGDB core.
71282 */
71283-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
71284+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
71285 {
71286 int err;
71287
71288@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
71289 *
71290 * Unregister it with the KGDB core.
71291 */
71292-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
71293+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
71294 {
71295 BUG_ON(kgdb_connected);
71296
71297@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
71298 */
71299 void kgdb_breakpoint(void)
71300 {
71301- atomic_set(&kgdb_setting_breakpoint, 1);
71302+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
71303 wmb(); /* Sync point before breakpoint */
71304 arch_kgdb_breakpoint();
71305 wmb(); /* Sync point after breakpoint */
71306- atomic_set(&kgdb_setting_breakpoint, 0);
71307+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
71308 }
71309 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
71310
71311diff --git a/kernel/kmod.c b/kernel/kmod.c
71312index d206078..e27ba6a 100644
71313--- a/kernel/kmod.c
71314+++ b/kernel/kmod.c
71315@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
71316 * If module auto-loading support is disabled then this function
71317 * becomes a no-operation.
71318 */
71319-int __request_module(bool wait, const char *fmt, ...)
71320+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
71321 {
71322- va_list args;
71323 char module_name[MODULE_NAME_LEN];
71324 unsigned int max_modprobes;
71325 int ret;
71326- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
71327+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
71328 static char *envp[] = { "HOME=/",
71329 "TERM=linux",
71330 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
71331@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
71332 if (ret)
71333 return ret;
71334
71335- va_start(args, fmt);
71336- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
71337- va_end(args);
71338+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
71339 if (ret >= MODULE_NAME_LEN)
71340 return -ENAMETOOLONG;
71341
71342+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71343+ if (!current_uid()) {
71344+ /* hack to workaround consolekit/udisks stupidity */
71345+ read_lock(&tasklist_lock);
71346+ if (!strcmp(current->comm, "mount") &&
71347+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
71348+ read_unlock(&tasklist_lock);
71349+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
71350+ return -EPERM;
71351+ }
71352+ read_unlock(&tasklist_lock);
71353+ }
71354+#endif
71355+
71356 /* If modprobe needs a service that is in a module, we get a recursive
71357 * loop. Limit the number of running kmod threads to max_threads/2 or
71358 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
71359@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
71360 atomic_dec(&kmod_concurrent);
71361 return ret;
71362 }
71363+
71364+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
71365+{
71366+ va_list args;
71367+ int ret;
71368+
71369+ va_start(args, fmt);
71370+ ret = ____request_module(wait, module_param, fmt, args);
71371+ va_end(args);
71372+
71373+ return ret;
71374+}
71375+
71376+int __request_module(bool wait, const char *fmt, ...)
71377+{
71378+ va_list args;
71379+ int ret;
71380+
71381+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71382+ if (current_uid()) {
71383+ char module_param[MODULE_NAME_LEN];
71384+
71385+ memset(module_param, 0, sizeof(module_param));
71386+
71387+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
71388+
71389+ va_start(args, fmt);
71390+ ret = ____request_module(wait, module_param, fmt, args);
71391+ va_end(args);
71392+
71393+ return ret;
71394+ }
71395+#endif
71396+
71397+ va_start(args, fmt);
71398+ ret = ____request_module(wait, NULL, fmt, args);
71399+ va_end(args);
71400+
71401+ return ret;
71402+}
71403+
71404+
71405 EXPORT_SYMBOL(__request_module);
71406 #endif /* CONFIG_MODULES */
71407
71408@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
71409 *
71410 * Thus the __user pointer cast is valid here.
71411 */
71412- sys_wait4(pid, (int __user *)&ret, 0, NULL);
71413+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
71414
71415 /*
71416 * If ret is 0, either ____call_usermodehelper failed and the
71417diff --git a/kernel/kprobes.c b/kernel/kprobes.c
71418index 5240d75..5a6fb33 100644
71419--- a/kernel/kprobes.c
71420+++ b/kernel/kprobes.c
71421@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
71422 * kernel image and loaded module images reside. This is required
71423 * so x86_64 can correctly handle the %rip-relative fixups.
71424 */
71425- kip->insns = module_alloc(PAGE_SIZE);
71426+ kip->insns = module_alloc_exec(PAGE_SIZE);
71427 if (!kip->insns) {
71428 kfree(kip);
71429 return NULL;
71430@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
71431 */
71432 if (!list_is_singular(&kprobe_insn_pages)) {
71433 list_del(&kip->list);
71434- module_free(NULL, kip->insns);
71435+ module_free_exec(NULL, kip->insns);
71436 kfree(kip);
71437 }
71438 return 1;
71439@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
71440 {
71441 int i, err = 0;
71442 unsigned long offset = 0, size = 0;
71443- char *modname, namebuf[128];
71444+ char *modname, namebuf[KSYM_NAME_LEN];
71445 const char *symbol_name;
71446 void *addr;
71447 struct kprobe_blackpoint *kb;
71448@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
71449 const char *sym = NULL;
71450 unsigned int i = *(loff_t *) v;
71451 unsigned long offset = 0;
71452- char *modname, namebuf[128];
71453+ char *modname, namebuf[KSYM_NAME_LEN];
71454
71455 head = &kprobe_table[i];
71456 preempt_disable();
71457diff --git a/kernel/lockdep.c b/kernel/lockdep.c
71458index d86fe89..d12fc66 100644
71459--- a/kernel/lockdep.c
71460+++ b/kernel/lockdep.c
71461@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
71462 /*
71463 * Various lockdep statistics:
71464 */
71465-atomic_t chain_lookup_hits;
71466-atomic_t chain_lookup_misses;
71467-atomic_t hardirqs_on_events;
71468-atomic_t hardirqs_off_events;
71469-atomic_t redundant_hardirqs_on;
71470-atomic_t redundant_hardirqs_off;
71471-atomic_t softirqs_on_events;
71472-atomic_t softirqs_off_events;
71473-atomic_t redundant_softirqs_on;
71474-atomic_t redundant_softirqs_off;
71475-atomic_t nr_unused_locks;
71476-atomic_t nr_cyclic_checks;
71477-atomic_t nr_find_usage_forwards_checks;
71478-atomic_t nr_find_usage_backwards_checks;
71479+atomic_unchecked_t chain_lookup_hits;
71480+atomic_unchecked_t chain_lookup_misses;
71481+atomic_unchecked_t hardirqs_on_events;
71482+atomic_unchecked_t hardirqs_off_events;
71483+atomic_unchecked_t redundant_hardirqs_on;
71484+atomic_unchecked_t redundant_hardirqs_off;
71485+atomic_unchecked_t softirqs_on_events;
71486+atomic_unchecked_t softirqs_off_events;
71487+atomic_unchecked_t redundant_softirqs_on;
71488+atomic_unchecked_t redundant_softirqs_off;
71489+atomic_unchecked_t nr_unused_locks;
71490+atomic_unchecked_t nr_cyclic_checks;
71491+atomic_unchecked_t nr_find_usage_forwards_checks;
71492+atomic_unchecked_t nr_find_usage_backwards_checks;
71493 #endif
71494
71495 /*
71496@@ -577,6 +577,10 @@ static int static_obj(void *obj)
71497 int i;
71498 #endif
71499
71500+#ifdef CONFIG_PAX_KERNEXEC
71501+ start = ktla_ktva(start);
71502+#endif
71503+
71504 /*
71505 * static variable?
71506 */
71507@@ -592,8 +596,7 @@ static int static_obj(void *obj)
71508 */
71509 for_each_possible_cpu(i) {
71510 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
71511- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
71512- + per_cpu_offset(i);
71513+ end = start + PERCPU_ENOUGH_ROOM;
71514
71515 if ((addr >= start) && (addr < end))
71516 return 1;
71517@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
71518 if (!static_obj(lock->key)) {
71519 debug_locks_off();
71520 printk("INFO: trying to register non-static key.\n");
71521+ printk("lock:%pS key:%pS.\n", lock, lock->key);
71522 printk("the code is fine but needs lockdep annotation.\n");
71523 printk("turning off the locking correctness validator.\n");
71524 dump_stack();
71525@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
71526 if (!class)
71527 return 0;
71528 }
71529- debug_atomic_inc((atomic_t *)&class->ops);
71530+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
71531 if (very_verbose(class)) {
71532 printk("\nacquire class [%p] %s", class->key, class->name);
71533 if (class->name_version > 1)
71534diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
71535index a2ee95a..092f0f2 100644
71536--- a/kernel/lockdep_internals.h
71537+++ b/kernel/lockdep_internals.h
71538@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
71539 /*
71540 * Various lockdep statistics:
71541 */
71542-extern atomic_t chain_lookup_hits;
71543-extern atomic_t chain_lookup_misses;
71544-extern atomic_t hardirqs_on_events;
71545-extern atomic_t hardirqs_off_events;
71546-extern atomic_t redundant_hardirqs_on;
71547-extern atomic_t redundant_hardirqs_off;
71548-extern atomic_t softirqs_on_events;
71549-extern atomic_t softirqs_off_events;
71550-extern atomic_t redundant_softirqs_on;
71551-extern atomic_t redundant_softirqs_off;
71552-extern atomic_t nr_unused_locks;
71553-extern atomic_t nr_cyclic_checks;
71554-extern atomic_t nr_cyclic_check_recursions;
71555-extern atomic_t nr_find_usage_forwards_checks;
71556-extern atomic_t nr_find_usage_forwards_recursions;
71557-extern atomic_t nr_find_usage_backwards_checks;
71558-extern atomic_t nr_find_usage_backwards_recursions;
71559-# define debug_atomic_inc(ptr) atomic_inc(ptr)
71560-# define debug_atomic_dec(ptr) atomic_dec(ptr)
71561-# define debug_atomic_read(ptr) atomic_read(ptr)
71562+extern atomic_unchecked_t chain_lookup_hits;
71563+extern atomic_unchecked_t chain_lookup_misses;
71564+extern atomic_unchecked_t hardirqs_on_events;
71565+extern atomic_unchecked_t hardirqs_off_events;
71566+extern atomic_unchecked_t redundant_hardirqs_on;
71567+extern atomic_unchecked_t redundant_hardirqs_off;
71568+extern atomic_unchecked_t softirqs_on_events;
71569+extern atomic_unchecked_t softirqs_off_events;
71570+extern atomic_unchecked_t redundant_softirqs_on;
71571+extern atomic_unchecked_t redundant_softirqs_off;
71572+extern atomic_unchecked_t nr_unused_locks;
71573+extern atomic_unchecked_t nr_cyclic_checks;
71574+extern atomic_unchecked_t nr_cyclic_check_recursions;
71575+extern atomic_unchecked_t nr_find_usage_forwards_checks;
71576+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
71577+extern atomic_unchecked_t nr_find_usage_backwards_checks;
71578+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
71579+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
71580+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
71581+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
71582 #else
71583 # define debug_atomic_inc(ptr) do { } while (0)
71584 # define debug_atomic_dec(ptr) do { } while (0)
71585diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
71586index d4aba4f..02a353f 100644
71587--- a/kernel/lockdep_proc.c
71588+++ b/kernel/lockdep_proc.c
71589@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
71590
71591 static void print_name(struct seq_file *m, struct lock_class *class)
71592 {
71593- char str[128];
71594+ char str[KSYM_NAME_LEN];
71595 const char *name = class->name;
71596
71597 if (!name) {
71598diff --git a/kernel/module.c b/kernel/module.c
71599index 4b270e6..2226274 100644
71600--- a/kernel/module.c
71601+++ b/kernel/module.c
71602@@ -55,6 +55,7 @@
71603 #include <linux/async.h>
71604 #include <linux/percpu.h>
71605 #include <linux/kmemleak.h>
71606+#include <linux/grsecurity.h>
71607
71608 #define CREATE_TRACE_POINTS
71609 #include <trace/events/module.h>
71610@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
71611 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
71612
71613 /* Bounds of module allocation, for speeding __module_address */
71614-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
71615+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
71616+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
71617
71618 int register_module_notifier(struct notifier_block * nb)
71619 {
71620@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
71621 return true;
71622
71623 list_for_each_entry_rcu(mod, &modules, list) {
71624- struct symsearch arr[] = {
71625+ struct symsearch modarr[] = {
71626 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
71627 NOT_GPL_ONLY, false },
71628 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
71629@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
71630 #endif
71631 };
71632
71633- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
71634+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
71635 return true;
71636 }
71637 return false;
71638@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
71639 void *ptr;
71640 int cpu;
71641
71642- if (align > PAGE_SIZE) {
71643+ if (align-1 >= PAGE_SIZE) {
71644 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
71645 name, align, PAGE_SIZE);
71646 align = PAGE_SIZE;
71647@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
71648 * /sys/module/foo/sections stuff
71649 * J. Corbet <corbet@lwn.net>
71650 */
71651-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
71652+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71653
71654 static inline bool sect_empty(const Elf_Shdr *sect)
71655 {
71656@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
71657 destroy_params(mod->kp, mod->num_kp);
71658
71659 /* This may be NULL, but that's OK */
71660- module_free(mod, mod->module_init);
71661+ module_free(mod, mod->module_init_rw);
71662+ module_free_exec(mod, mod->module_init_rx);
71663 kfree(mod->args);
71664 if (mod->percpu)
71665 percpu_modfree(mod->percpu);
71666@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
71667 percpu_modfree(mod->refptr);
71668 #endif
71669 /* Free lock-classes: */
71670- lockdep_free_key_range(mod->module_core, mod->core_size);
71671+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
71672+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
71673
71674 /* Finally, free the core (containing the module structure) */
71675- module_free(mod, mod->module_core);
71676+ module_free_exec(mod, mod->module_core_rx);
71677+ module_free(mod, mod->module_core_rw);
71678
71679 #ifdef CONFIG_MPU
71680 update_protections(current->mm);
71681@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71682 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
71683 int ret = 0;
71684 const struct kernel_symbol *ksym;
71685+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71686+ int is_fs_load = 0;
71687+ int register_filesystem_found = 0;
71688+ char *p;
71689+
71690+ p = strstr(mod->args, "grsec_modharden_fs");
71691+
71692+ if (p) {
71693+ char *endptr = p + strlen("grsec_modharden_fs");
71694+ /* copy \0 as well */
71695+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71696+ is_fs_load = 1;
71697+ }
71698+#endif
71699+
71700
71701 for (i = 1; i < n; i++) {
71702+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71703+ const char *name = strtab + sym[i].st_name;
71704+
71705+ /* it's a real shame this will never get ripped and copied
71706+ upstream! ;(
71707+ */
71708+ if (is_fs_load && !strcmp(name, "register_filesystem"))
71709+ register_filesystem_found = 1;
71710+#endif
71711 switch (sym[i].st_shndx) {
71712 case SHN_COMMON:
71713 /* We compiled with -fno-common. These are not
71714@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71715 strtab + sym[i].st_name, mod);
71716 /* Ok if resolved. */
71717 if (ksym) {
71718+ pax_open_kernel();
71719 sym[i].st_value = ksym->value;
71720+ pax_close_kernel();
71721 break;
71722 }
71723
71724@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
71725 secbase = (unsigned long)mod->percpu;
71726 else
71727 secbase = sechdrs[sym[i].st_shndx].sh_addr;
71728+ pax_open_kernel();
71729 sym[i].st_value += secbase;
71730+ pax_close_kernel();
71731 break;
71732 }
71733 }
71734
71735+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71736+ if (is_fs_load && !register_filesystem_found) {
71737+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
71738+ ret = -EPERM;
71739+ }
71740+#endif
71741+
71742 return ret;
71743 }
71744
71745@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
71746 || s->sh_entsize != ~0UL
71747 || strstarts(secstrings + s->sh_name, ".init"))
71748 continue;
71749- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
71750+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71751+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
71752+ else
71753+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
71754 DEBUGP("\t%s\n", secstrings + s->sh_name);
71755 }
71756- if (m == 0)
71757- mod->core_text_size = mod->core_size;
71758 }
71759
71760 DEBUGP("Init section allocation order:\n");
71761@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
71762 || s->sh_entsize != ~0UL
71763 || !strstarts(secstrings + s->sh_name, ".init"))
71764 continue;
71765- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
71766- | INIT_OFFSET_MASK);
71767+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
71768+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
71769+ else
71770+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
71771+ s->sh_entsize |= INIT_OFFSET_MASK;
71772 DEBUGP("\t%s\n", secstrings + s->sh_name);
71773 }
71774- if (m == 0)
71775- mod->init_text_size = mod->init_size;
71776 }
71777 }
71778
71779@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
71780
71781 /* As per nm */
71782 static char elf_type(const Elf_Sym *sym,
71783- Elf_Shdr *sechdrs,
71784- const char *secstrings,
71785- struct module *mod)
71786+ const Elf_Shdr *sechdrs,
71787+ const char *secstrings)
71788 {
71789 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
71790 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
71791@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
71792
71793 /* Put symbol section at end of init part of module. */
71794 symsect->sh_flags |= SHF_ALLOC;
71795- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
71796+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
71797 symindex) | INIT_OFFSET_MASK;
71798 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
71799
71800@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
71801 }
71802
71803 /* Append room for core symbols at end of core part. */
71804- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
71805- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
71806+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
71807+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
71808
71809 /* Put string table section at end of init part of module. */
71810 strsect->sh_flags |= SHF_ALLOC;
71811- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
71812+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
71813 strindex) | INIT_OFFSET_MASK;
71814 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
71815
71816 /* Append room for core symbols' strings at end of core part. */
71817- *pstroffs = mod->core_size;
71818+ *pstroffs = mod->core_size_rx;
71819 __set_bit(0, strmap);
71820- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
71821+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
71822
71823 return symoffs;
71824 }
71825@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
71826 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
71827 mod->strtab = (void *)sechdrs[strindex].sh_addr;
71828
71829+ pax_open_kernel();
71830+
71831 /* Set types up while we still have access to sections. */
71832 for (i = 0; i < mod->num_symtab; i++)
71833 mod->symtab[i].st_info
71834- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
71835+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
71836
71837- mod->core_symtab = dst = mod->module_core + symoffs;
71838+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
71839 src = mod->symtab;
71840 *dst = *src;
71841 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
71842@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
71843 }
71844 mod->core_num_syms = ndst;
71845
71846- mod->core_strtab = s = mod->module_core + stroffs;
71847+ mod->core_strtab = s = mod->module_core_rx + stroffs;
71848 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
71849 if (test_bit(i, strmap))
71850 *++s = mod->strtab[i];
71851+
71852+ pax_close_kernel();
71853 }
71854 #else
71855 static inline unsigned long layout_symtab(struct module *mod,
71856@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
71857 #endif
71858 }
71859
71860-static void *module_alloc_update_bounds(unsigned long size)
71861+static void *module_alloc_update_bounds_rw(unsigned long size)
71862 {
71863 void *ret = module_alloc(size);
71864
71865 if (ret) {
71866 /* Update module bounds. */
71867- if ((unsigned long)ret < module_addr_min)
71868- module_addr_min = (unsigned long)ret;
71869- if ((unsigned long)ret + size > module_addr_max)
71870- module_addr_max = (unsigned long)ret + size;
71871+ if ((unsigned long)ret < module_addr_min_rw)
71872+ module_addr_min_rw = (unsigned long)ret;
71873+ if ((unsigned long)ret + size > module_addr_max_rw)
71874+ module_addr_max_rw = (unsigned long)ret + size;
71875+ }
71876+ return ret;
71877+}
71878+
71879+static void *module_alloc_update_bounds_rx(unsigned long size)
71880+{
71881+ void *ret = module_alloc_exec(size);
71882+
71883+ if (ret) {
71884+ /* Update module bounds. */
71885+ if ((unsigned long)ret < module_addr_min_rx)
71886+ module_addr_min_rx = (unsigned long)ret;
71887+ if ((unsigned long)ret + size > module_addr_max_rx)
71888+ module_addr_max_rx = (unsigned long)ret + size;
71889 }
71890 return ret;
71891 }
71892@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
71893 unsigned int i;
71894
71895 /* only scan the sections containing data */
71896- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
71897- (unsigned long)mod->module_core,
71898+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
71899+ (unsigned long)mod->module_core_rw,
71900 sizeof(struct module), GFP_KERNEL);
71901
71902 for (i = 1; i < hdr->e_shnum; i++) {
71903@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
71904 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
71905 continue;
71906
71907- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
71908- (unsigned long)mod->module_core,
71909+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
71910+ (unsigned long)mod->module_core_rw,
71911 sechdrs[i].sh_size, GFP_KERNEL);
71912 }
71913 }
71914@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
71915 Elf_Ehdr *hdr;
71916 Elf_Shdr *sechdrs;
71917 char *secstrings, *args, *modmagic, *strtab = NULL;
71918- char *staging;
71919+ char *staging, *license;
71920 unsigned int i;
71921 unsigned int symindex = 0;
71922 unsigned int strindex = 0;
71923@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
71924 goto free_hdr;
71925 }
71926
71927+ license = get_modinfo(sechdrs, infoindex, "license");
71928+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
71929+ if (!license || !license_is_gpl_compatible(license)) {
71930+ err -ENOEXEC;
71931+ goto free_hdr;
71932+ }
71933+#endif
71934+
71935 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
71936 /* This is allowed: modprobe --force will invalidate it. */
71937 if (!modmagic) {
71938@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
71939 secstrings, &stroffs, strmap);
71940
71941 /* Do the allocs. */
71942- ptr = module_alloc_update_bounds(mod->core_size);
71943+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
71944 /*
71945 * The pointer to this block is stored in the module structure
71946 * which is inside the block. Just mark it as not being a
71947@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
71948 err = -ENOMEM;
71949 goto free_percpu;
71950 }
71951- memset(ptr, 0, mod->core_size);
71952- mod->module_core = ptr;
71953+ memset(ptr, 0, mod->core_size_rw);
71954+ mod->module_core_rw = ptr;
71955
71956- ptr = module_alloc_update_bounds(mod->init_size);
71957+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
71958 /*
71959 * The pointer to this block is stored in the module structure
71960 * which is inside the block. This block doesn't need to be
71961 * scanned as it contains data and code that will be freed
71962 * after the module is initialized.
71963 */
71964- kmemleak_ignore(ptr);
71965- if (!ptr && mod->init_size) {
71966+ kmemleak_not_leak(ptr);
71967+ if (!ptr && mod->init_size_rw) {
71968+ err = -ENOMEM;
71969+ goto free_core_rw;
71970+ }
71971+ memset(ptr, 0, mod->init_size_rw);
71972+ mod->module_init_rw = ptr;
71973+
71974+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
71975+ kmemleak_not_leak(ptr);
71976+ if (!ptr) {
71977+ err = -ENOMEM;
71978+ goto free_init_rw;
71979+ }
71980+
71981+ pax_open_kernel();
71982+ memset(ptr, 0, mod->core_size_rx);
71983+ pax_close_kernel();
71984+ mod->module_core_rx = ptr;
71985+
71986+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
71987+ kmemleak_not_leak(ptr);
71988+ if (!ptr && mod->init_size_rx) {
71989 err = -ENOMEM;
71990- goto free_core;
71991+ goto free_core_rx;
71992 }
71993- memset(ptr, 0, mod->init_size);
71994- mod->module_init = ptr;
71995+
71996+ pax_open_kernel();
71997+ memset(ptr, 0, mod->init_size_rx);
71998+ pax_close_kernel();
71999+ mod->module_init_rx = ptr;
72000
72001 /* Transfer each section which specifies SHF_ALLOC */
72002 DEBUGP("final section addresses:\n");
72003@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
72004 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
72005 continue;
72006
72007- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
72008- dest = mod->module_init
72009- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72010- else
72011- dest = mod->module_core + sechdrs[i].sh_entsize;
72012+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
72013+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72014+ dest = mod->module_init_rw
72015+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72016+ else
72017+ dest = mod->module_init_rx
72018+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72019+ } else {
72020+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72021+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
72022+ else
72023+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
72024+ }
72025
72026- if (sechdrs[i].sh_type != SHT_NOBITS)
72027- memcpy(dest, (void *)sechdrs[i].sh_addr,
72028- sechdrs[i].sh_size);
72029+ if (sechdrs[i].sh_type != SHT_NOBITS) {
72030+
72031+#ifdef CONFIG_PAX_KERNEXEC
72032+#ifdef CONFIG_X86_64
72033+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
72034+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
72035+#endif
72036+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
72037+ pax_open_kernel();
72038+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72039+ pax_close_kernel();
72040+ } else
72041+#endif
72042+
72043+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72044+ }
72045 /* Update sh_addr to point to copy in image. */
72046- sechdrs[i].sh_addr = (unsigned long)dest;
72047+
72048+#ifdef CONFIG_PAX_KERNEXEC
72049+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
72050+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
72051+ else
72052+#endif
72053+
72054+ sechdrs[i].sh_addr = (unsigned long)dest;
72055 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
72056 }
72057 /* Module has been moved. */
72058@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
72059 mod->name);
72060 if (!mod->refptr) {
72061 err = -ENOMEM;
72062- goto free_init;
72063+ goto free_init_rx;
72064 }
72065 #endif
72066 /* Now we've moved module, initialize linked lists, etc. */
72067@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
72068 goto free_unload;
72069
72070 /* Set up license info based on the info section */
72071- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
72072+ set_license(mod, license);
72073
72074 /*
72075 * ndiswrapper is under GPL by itself, but loads proprietary modules.
72076@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
72077 /* Set up MODINFO_ATTR fields */
72078 setup_modinfo(mod, sechdrs, infoindex);
72079
72080+ mod->args = args;
72081+
72082+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72083+ {
72084+ char *p, *p2;
72085+
72086+ if (strstr(mod->args, "grsec_modharden_netdev")) {
72087+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
72088+ err = -EPERM;
72089+ goto cleanup;
72090+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
72091+ p += strlen("grsec_modharden_normal");
72092+ p2 = strstr(p, "_");
72093+ if (p2) {
72094+ *p2 = '\0';
72095+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
72096+ *p2 = '_';
72097+ }
72098+ err = -EPERM;
72099+ goto cleanup;
72100+ }
72101+ }
72102+#endif
72103+
72104+
72105 /* Fix up syms, so that st_value is a pointer to location. */
72106 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
72107 mod);
72108@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
72109
72110 /* Now do relocations. */
72111 for (i = 1; i < hdr->e_shnum; i++) {
72112- const char *strtab = (char *)sechdrs[strindex].sh_addr;
72113 unsigned int info = sechdrs[i].sh_info;
72114+ strtab = (char *)sechdrs[strindex].sh_addr;
72115
72116 /* Not a valid relocation section? */
72117 if (info >= hdr->e_shnum)
72118@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
72119 * Do it before processing of module parameters, so the module
72120 * can provide parameter accessor functions of its own.
72121 */
72122- if (mod->module_init)
72123- flush_icache_range((unsigned long)mod->module_init,
72124- (unsigned long)mod->module_init
72125- + mod->init_size);
72126- flush_icache_range((unsigned long)mod->module_core,
72127- (unsigned long)mod->module_core + mod->core_size);
72128+ if (mod->module_init_rx)
72129+ flush_icache_range((unsigned long)mod->module_init_rx,
72130+ (unsigned long)mod->module_init_rx
72131+ + mod->init_size_rx);
72132+ flush_icache_range((unsigned long)mod->module_core_rx,
72133+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
72134
72135 set_fs(old_fs);
72136
72137- mod->args = args;
72138 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
72139 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
72140 mod->name);
72141@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
72142 free_unload:
72143 module_unload_free(mod);
72144 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
72145+ free_init_rx:
72146 percpu_modfree(mod->refptr);
72147- free_init:
72148 #endif
72149- module_free(mod, mod->module_init);
72150- free_core:
72151- module_free(mod, mod->module_core);
72152+ module_free_exec(mod, mod->module_init_rx);
72153+ free_core_rx:
72154+ module_free_exec(mod, mod->module_core_rx);
72155+ free_init_rw:
72156+ module_free(mod, mod->module_init_rw);
72157+ free_core_rw:
72158+ module_free(mod, mod->module_core_rw);
72159 /* mod will be freed with core. Don't access it beyond this line! */
72160 free_percpu:
72161 if (percpu)
72162@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
72163 mod->symtab = mod->core_symtab;
72164 mod->strtab = mod->core_strtab;
72165 #endif
72166- module_free(mod, mod->module_init);
72167- mod->module_init = NULL;
72168- mod->init_size = 0;
72169- mod->init_text_size = 0;
72170+ module_free(mod, mod->module_init_rw);
72171+ module_free_exec(mod, mod->module_init_rx);
72172+ mod->module_init_rw = NULL;
72173+ mod->module_init_rx = NULL;
72174+ mod->init_size_rw = 0;
72175+ mod->init_size_rx = 0;
72176 mutex_unlock(&module_mutex);
72177
72178 return 0;
72179@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
72180 unsigned long nextval;
72181
72182 /* At worse, next value is at end of module */
72183- if (within_module_init(addr, mod))
72184- nextval = (unsigned long)mod->module_init+mod->init_text_size;
72185+ if (within_module_init_rx(addr, mod))
72186+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
72187+ else if (within_module_init_rw(addr, mod))
72188+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
72189+ else if (within_module_core_rx(addr, mod))
72190+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
72191+ else if (within_module_core_rw(addr, mod))
72192+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
72193 else
72194- nextval = (unsigned long)mod->module_core+mod->core_text_size;
72195+ return NULL;
72196
72197 /* Scan for closest preceeding symbol, and next symbol. (ELF
72198 starts real symbols at 1). */
72199@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
72200 char buf[8];
72201
72202 seq_printf(m, "%s %u",
72203- mod->name, mod->init_size + mod->core_size);
72204+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
72205 print_unload_info(m, mod);
72206
72207 /* Informative for users. */
72208@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
72209 mod->state == MODULE_STATE_COMING ? "Loading":
72210 "Live");
72211 /* Used by oprofile and other similar tools. */
72212- seq_printf(m, " 0x%p", mod->module_core);
72213+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
72214
72215 /* Taints info */
72216 if (mod->taints)
72217@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
72218
72219 static int __init proc_modules_init(void)
72220 {
72221+#ifndef CONFIG_GRKERNSEC_HIDESYM
72222+#ifdef CONFIG_GRKERNSEC_PROC_USER
72223+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72224+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72225+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
72226+#else
72227 proc_create("modules", 0, NULL, &proc_modules_operations);
72228+#endif
72229+#else
72230+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72231+#endif
72232 return 0;
72233 }
72234 module_init(proc_modules_init);
72235@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
72236 {
72237 struct module *mod;
72238
72239- if (addr < module_addr_min || addr > module_addr_max)
72240+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
72241+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
72242 return NULL;
72243
72244 list_for_each_entry_rcu(mod, &modules, list)
72245- if (within_module_core(addr, mod)
72246- || within_module_init(addr, mod))
72247+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
72248 return mod;
72249 return NULL;
72250 }
72251@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
72252 */
72253 struct module *__module_text_address(unsigned long addr)
72254 {
72255- struct module *mod = __module_address(addr);
72256+ struct module *mod;
72257+
72258+#ifdef CONFIG_X86_32
72259+ addr = ktla_ktva(addr);
72260+#endif
72261+
72262+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
72263+ return NULL;
72264+
72265+ mod = __module_address(addr);
72266+
72267 if (mod) {
72268 /* Make sure it's within the text section. */
72269- if (!within(addr, mod->module_init, mod->init_text_size)
72270- && !within(addr, mod->module_core, mod->core_text_size))
72271+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
72272 mod = NULL;
72273 }
72274 return mod;
72275diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
72276index ec815a9..fe46e99 100644
72277--- a/kernel/mutex-debug.c
72278+++ b/kernel/mutex-debug.c
72279@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
72280 }
72281
72282 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72283- struct thread_info *ti)
72284+ struct task_struct *task)
72285 {
72286 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
72287
72288 /* Mark the current thread as blocked on the lock: */
72289- ti->task->blocked_on = waiter;
72290+ task->blocked_on = waiter;
72291 }
72292
72293 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72294- struct thread_info *ti)
72295+ struct task_struct *task)
72296 {
72297 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
72298- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
72299- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
72300- ti->task->blocked_on = NULL;
72301+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
72302+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
72303+ task->blocked_on = NULL;
72304
72305 list_del_init(&waiter->list);
72306 waiter->task = NULL;
72307@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
72308 return;
72309
72310 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
72311- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
72312+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
72313 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
72314 mutex_clear_owner(lock);
72315 }
72316diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
72317index 6b2d735..372d3c4 100644
72318--- a/kernel/mutex-debug.h
72319+++ b/kernel/mutex-debug.h
72320@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
72321 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
72322 extern void debug_mutex_add_waiter(struct mutex *lock,
72323 struct mutex_waiter *waiter,
72324- struct thread_info *ti);
72325+ struct task_struct *task);
72326 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72327- struct thread_info *ti);
72328+ struct task_struct *task);
72329 extern void debug_mutex_unlock(struct mutex *lock);
72330 extern void debug_mutex_init(struct mutex *lock, const char *name,
72331 struct lock_class_key *key);
72332
72333 static inline void mutex_set_owner(struct mutex *lock)
72334 {
72335- lock->owner = current_thread_info();
72336+ lock->owner = current;
72337 }
72338
72339 static inline void mutex_clear_owner(struct mutex *lock)
72340diff --git a/kernel/mutex.c b/kernel/mutex.c
72341index f85644c..5ee9f77 100644
72342--- a/kernel/mutex.c
72343+++ b/kernel/mutex.c
72344@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72345 */
72346
72347 for (;;) {
72348- struct thread_info *owner;
72349+ struct task_struct *owner;
72350
72351 /*
72352 * If we own the BKL, then don't spin. The owner of
72353@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72354 spin_lock_mutex(&lock->wait_lock, flags);
72355
72356 debug_mutex_lock_common(lock, &waiter);
72357- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
72358+ debug_mutex_add_waiter(lock, &waiter, task);
72359
72360 /* add waiting tasks to the end of the waitqueue (FIFO): */
72361 list_add_tail(&waiter.list, &lock->wait_list);
72362@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72363 * TASK_UNINTERRUPTIBLE case.)
72364 */
72365 if (unlikely(signal_pending_state(state, task))) {
72366- mutex_remove_waiter(lock, &waiter,
72367- task_thread_info(task));
72368+ mutex_remove_waiter(lock, &waiter, task);
72369 mutex_release(&lock->dep_map, 1, ip);
72370 spin_unlock_mutex(&lock->wait_lock, flags);
72371
72372@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72373 done:
72374 lock_acquired(&lock->dep_map, ip);
72375 /* got the lock - rejoice! */
72376- mutex_remove_waiter(lock, &waiter, current_thread_info());
72377+ mutex_remove_waiter(lock, &waiter, task);
72378 mutex_set_owner(lock);
72379
72380 /* set it to 0 if there are no waiters left: */
72381diff --git a/kernel/mutex.h b/kernel/mutex.h
72382index 67578ca..4115fbf 100644
72383--- a/kernel/mutex.h
72384+++ b/kernel/mutex.h
72385@@ -19,7 +19,7 @@
72386 #ifdef CONFIG_SMP
72387 static inline void mutex_set_owner(struct mutex *lock)
72388 {
72389- lock->owner = current_thread_info();
72390+ lock->owner = current;
72391 }
72392
72393 static inline void mutex_clear_owner(struct mutex *lock)
72394diff --git a/kernel/panic.c b/kernel/panic.c
72395index 96b45d0..45c447a 100644
72396--- a/kernel/panic.c
72397+++ b/kernel/panic.c
72398@@ -352,7 +352,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
72399 const char *board;
72400
72401 printk(KERN_WARNING "------------[ cut here ]------------\n");
72402- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
72403+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
72404 board = dmi_get_system_info(DMI_PRODUCT_NAME);
72405 if (board)
72406 printk(KERN_WARNING "Hardware name: %s\n", board);
72407@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
72408 */
72409 void __stack_chk_fail(void)
72410 {
72411- panic("stack-protector: Kernel stack is corrupted in: %p\n",
72412+ dump_stack();
72413+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
72414 __builtin_return_address(0));
72415 }
72416 EXPORT_SYMBOL(__stack_chk_fail);
72417diff --git a/kernel/params.c b/kernel/params.c
72418index d656c27..21e452c 100644
72419--- a/kernel/params.c
72420+++ b/kernel/params.c
72421@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
72422 return ret;
72423 }
72424
72425-static struct sysfs_ops module_sysfs_ops = {
72426+static const struct sysfs_ops module_sysfs_ops = {
72427 .show = module_attr_show,
72428 .store = module_attr_store,
72429 };
72430@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
72431 return 0;
72432 }
72433
72434-static struct kset_uevent_ops module_uevent_ops = {
72435+static const struct kset_uevent_ops module_uevent_ops = {
72436 .filter = uevent_filter,
72437 };
72438
72439diff --git a/kernel/perf_event.c b/kernel/perf_event.c
72440index 37ebc14..9c121d9 100644
72441--- a/kernel/perf_event.c
72442+++ b/kernel/perf_event.c
72443@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
72444 */
72445 int sysctl_perf_event_sample_rate __read_mostly = 100000;
72446
72447-static atomic64_t perf_event_id;
72448+static atomic64_unchecked_t perf_event_id;
72449
72450 /*
72451 * Lock for (sysadmin-configurable) event reservations:
72452@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
72453 * In order to keep per-task stats reliable we need to flip the event
72454 * values when we flip the contexts.
72455 */
72456- value = atomic64_read(&next_event->count);
72457- value = atomic64_xchg(&event->count, value);
72458- atomic64_set(&next_event->count, value);
72459+ value = atomic64_read_unchecked(&next_event->count);
72460+ value = atomic64_xchg_unchecked(&event->count, value);
72461+ atomic64_set_unchecked(&next_event->count, value);
72462
72463 swap(event->total_time_enabled, next_event->total_time_enabled);
72464 swap(event->total_time_running, next_event->total_time_running);
72465@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
72466 update_event_times(event);
72467 }
72468
72469- return atomic64_read(&event->count);
72470+ return atomic64_read_unchecked(&event->count);
72471 }
72472
72473 /*
72474@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
72475 values[n++] = 1 + leader->nr_siblings;
72476 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72477 values[n++] = leader->total_time_enabled +
72478- atomic64_read(&leader->child_total_time_enabled);
72479+ atomic64_read_unchecked(&leader->child_total_time_enabled);
72480 }
72481 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72482 values[n++] = leader->total_time_running +
72483- atomic64_read(&leader->child_total_time_running);
72484+ atomic64_read_unchecked(&leader->child_total_time_running);
72485 }
72486
72487 size = n * sizeof(u64);
72488@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
72489 values[n++] = perf_event_read_value(event);
72490 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72491 values[n++] = event->total_time_enabled +
72492- atomic64_read(&event->child_total_time_enabled);
72493+ atomic64_read_unchecked(&event->child_total_time_enabled);
72494 }
72495 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72496 values[n++] = event->total_time_running +
72497- atomic64_read(&event->child_total_time_running);
72498+ atomic64_read_unchecked(&event->child_total_time_running);
72499 }
72500 if (read_format & PERF_FORMAT_ID)
72501 values[n++] = primary_event_id(event);
72502@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
72503 static void perf_event_reset(struct perf_event *event)
72504 {
72505 (void)perf_event_read(event);
72506- atomic64_set(&event->count, 0);
72507+ atomic64_set_unchecked(&event->count, 0);
72508 perf_event_update_userpage(event);
72509 }
72510
72511@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
72512 ++userpg->lock;
72513 barrier();
72514 userpg->index = perf_event_index(event);
72515- userpg->offset = atomic64_read(&event->count);
72516+ userpg->offset = atomic64_read_unchecked(&event->count);
72517 if (event->state == PERF_EVENT_STATE_ACTIVE)
72518- userpg->offset -= atomic64_read(&event->hw.prev_count);
72519+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
72520
72521 userpg->time_enabled = event->total_time_enabled +
72522- atomic64_read(&event->child_total_time_enabled);
72523+ atomic64_read_unchecked(&event->child_total_time_enabled);
72524
72525 userpg->time_running = event->total_time_running +
72526- atomic64_read(&event->child_total_time_running);
72527+ atomic64_read_unchecked(&event->child_total_time_running);
72528
72529 barrier();
72530 ++userpg->lock;
72531@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
72532 u64 values[4];
72533 int n = 0;
72534
72535- values[n++] = atomic64_read(&event->count);
72536+ values[n++] = atomic64_read_unchecked(&event->count);
72537 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72538 values[n++] = event->total_time_enabled +
72539- atomic64_read(&event->child_total_time_enabled);
72540+ atomic64_read_unchecked(&event->child_total_time_enabled);
72541 }
72542 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72543 values[n++] = event->total_time_running +
72544- atomic64_read(&event->child_total_time_running);
72545+ atomic64_read_unchecked(&event->child_total_time_running);
72546 }
72547 if (read_format & PERF_FORMAT_ID)
72548 values[n++] = primary_event_id(event);
72549@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72550 if (leader != event)
72551 leader->pmu->read(leader);
72552
72553- values[n++] = atomic64_read(&leader->count);
72554+ values[n++] = atomic64_read_unchecked(&leader->count);
72555 if (read_format & PERF_FORMAT_ID)
72556 values[n++] = primary_event_id(leader);
72557
72558@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72559 if (sub != event)
72560 sub->pmu->read(sub);
72561
72562- values[n++] = atomic64_read(&sub->count);
72563+ values[n++] = atomic64_read_unchecked(&sub->count);
72564 if (read_format & PERF_FORMAT_ID)
72565 values[n++] = primary_event_id(sub);
72566
72567@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
72568 * need to add enough zero bytes after the string to handle
72569 * the 64bit alignment we do later.
72570 */
72571- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
72572+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
72573 if (!buf) {
72574 name = strncpy(tmp, "//enomem", sizeof(tmp));
72575 goto got_name;
72576 }
72577- name = d_path(&file->f_path, buf, PATH_MAX);
72578+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
72579 if (IS_ERR(name)) {
72580 name = strncpy(tmp, "//toolong", sizeof(tmp));
72581 goto got_name;
72582@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
72583 {
72584 struct hw_perf_event *hwc = &event->hw;
72585
72586- atomic64_add(nr, &event->count);
72587+ atomic64_add_unchecked(nr, &event->count);
72588
72589 if (!hwc->sample_period)
72590 return;
72591@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
72592 u64 now;
72593
72594 now = cpu_clock(cpu);
72595- prev = atomic64_read(&event->hw.prev_count);
72596- atomic64_set(&event->hw.prev_count, now);
72597- atomic64_add(now - prev, &event->count);
72598+ prev = atomic64_read_unchecked(&event->hw.prev_count);
72599+ atomic64_set_unchecked(&event->hw.prev_count, now);
72600+ atomic64_add_unchecked(now - prev, &event->count);
72601 }
72602
72603 static int cpu_clock_perf_event_enable(struct perf_event *event)
72604@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
72605 struct hw_perf_event *hwc = &event->hw;
72606 int cpu = raw_smp_processor_id();
72607
72608- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
72609+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
72610 perf_swevent_start_hrtimer(event);
72611
72612 return 0;
72613@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
72614 u64 prev;
72615 s64 delta;
72616
72617- prev = atomic64_xchg(&event->hw.prev_count, now);
72618+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
72619 delta = now - prev;
72620- atomic64_add(delta, &event->count);
72621+ atomic64_add_unchecked(delta, &event->count);
72622 }
72623
72624 static int task_clock_perf_event_enable(struct perf_event *event)
72625@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
72626
72627 now = event->ctx->time;
72628
72629- atomic64_set(&hwc->prev_count, now);
72630+ atomic64_set_unchecked(&hwc->prev_count, now);
72631
72632 perf_swevent_start_hrtimer(event);
72633
72634@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
72635 event->parent = parent_event;
72636
72637 event->ns = get_pid_ns(current->nsproxy->pid_ns);
72638- event->id = atomic64_inc_return(&perf_event_id);
72639+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
72640
72641 event->state = PERF_EVENT_STATE_INACTIVE;
72642
72643@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
72644 if (child_event->attr.inherit_stat)
72645 perf_event_read_event(child_event, child);
72646
72647- child_val = atomic64_read(&child_event->count);
72648+ child_val = atomic64_read_unchecked(&child_event->count);
72649
72650 /*
72651 * Add back the child's count to the parent's count:
72652 */
72653- atomic64_add(child_val, &parent_event->count);
72654- atomic64_add(child_event->total_time_enabled,
72655+ atomic64_add_unchecked(child_val, &parent_event->count);
72656+ atomic64_add_unchecked(child_event->total_time_enabled,
72657 &parent_event->child_total_time_enabled);
72658- atomic64_add(child_event->total_time_running,
72659+ atomic64_add_unchecked(child_event->total_time_running,
72660 &parent_event->child_total_time_running);
72661
72662 /*
72663diff --git a/kernel/pid.c b/kernel/pid.c
72664index fce7198..4f23a7e 100644
72665--- a/kernel/pid.c
72666+++ b/kernel/pid.c
72667@@ -33,6 +33,7 @@
72668 #include <linux/rculist.h>
72669 #include <linux/bootmem.h>
72670 #include <linux/hash.h>
72671+#include <linux/security.h>
72672 #include <linux/pid_namespace.h>
72673 #include <linux/init_task.h>
72674 #include <linux/syscalls.h>
72675@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
72676
72677 int pid_max = PID_MAX_DEFAULT;
72678
72679-#define RESERVED_PIDS 300
72680+#define RESERVED_PIDS 500
72681
72682 int pid_max_min = RESERVED_PIDS + 1;
72683 int pid_max_max = PID_MAX_LIMIT;
72684@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
72685 */
72686 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
72687 {
72688- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72689+ struct task_struct *task;
72690+
72691+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
72692+
72693+ if (gr_pid_is_chrooted(task))
72694+ return NULL;
72695+
72696+ return task;
72697 }
72698
72699 struct task_struct *find_task_by_vpid(pid_t vnr)
72700@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
72701 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
72702 }
72703
72704+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
72705+{
72706+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
72707+}
72708+
72709 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
72710 {
72711 struct pid *pid;
72712diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
72713index 5c9dc22..d271117 100644
72714--- a/kernel/posix-cpu-timers.c
72715+++ b/kernel/posix-cpu-timers.c
72716@@ -6,6 +6,7 @@
72717 #include <linux/posix-timers.h>
72718 #include <linux/errno.h>
72719 #include <linux/math64.h>
72720+#include <linux/security.h>
72721 #include <asm/uaccess.h>
72722 #include <linux/kernel_stat.h>
72723 #include <trace/events/timer.h>
72724@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
72725
72726 static __init int init_posix_cpu_timers(void)
72727 {
72728- struct k_clock process = {
72729+ static struct k_clock process = {
72730 .clock_getres = process_cpu_clock_getres,
72731 .clock_get = process_cpu_clock_get,
72732 .clock_set = do_posix_clock_nosettime,
72733@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
72734 .nsleep = process_cpu_nsleep,
72735 .nsleep_restart = process_cpu_nsleep_restart,
72736 };
72737- struct k_clock thread = {
72738+ static struct k_clock thread = {
72739 .clock_getres = thread_cpu_clock_getres,
72740 .clock_get = thread_cpu_clock_get,
72741 .clock_set = do_posix_clock_nosettime,
72742diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
72743index 5e76d22..cf1baeb 100644
72744--- a/kernel/posix-timers.c
72745+++ b/kernel/posix-timers.c
72746@@ -42,6 +42,7 @@
72747 #include <linux/compiler.h>
72748 #include <linux/idr.h>
72749 #include <linux/posix-timers.h>
72750+#include <linux/grsecurity.h>
72751 #include <linux/syscalls.h>
72752 #include <linux/wait.h>
72753 #include <linux/workqueue.h>
72754@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
72755 * which we beg off on and pass to do_sys_settimeofday().
72756 */
72757
72758-static struct k_clock posix_clocks[MAX_CLOCKS];
72759+static struct k_clock *posix_clocks[MAX_CLOCKS];
72760
72761 /*
72762 * These ones are defined below.
72763@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
72764 */
72765 #define CLOCK_DISPATCH(clock, call, arglist) \
72766 ((clock) < 0 ? posix_cpu_##call arglist : \
72767- (posix_clocks[clock].call != NULL \
72768- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
72769+ (posix_clocks[clock]->call != NULL \
72770+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
72771
72772 /*
72773 * Default clock hook functions when the struct k_clock passed
72774@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
72775 struct timespec *tp)
72776 {
72777 tp->tv_sec = 0;
72778- tp->tv_nsec = posix_clocks[which_clock].res;
72779+ tp->tv_nsec = posix_clocks[which_clock]->res;
72780 return 0;
72781 }
72782
72783@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
72784 return 0;
72785 if ((unsigned) which_clock >= MAX_CLOCKS)
72786 return 1;
72787- if (posix_clocks[which_clock].clock_getres != NULL)
72788+ if (posix_clocks[which_clock] == NULL)
72789 return 0;
72790- if (posix_clocks[which_clock].res != 0)
72791+ if (posix_clocks[which_clock]->clock_getres != NULL)
72792+ return 0;
72793+ if (posix_clocks[which_clock]->res != 0)
72794 return 0;
72795 return 1;
72796 }
72797@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
72798 */
72799 static __init int init_posix_timers(void)
72800 {
72801- struct k_clock clock_realtime = {
72802+ static struct k_clock clock_realtime = {
72803 .clock_getres = hrtimer_get_res,
72804 };
72805- struct k_clock clock_monotonic = {
72806+ static struct k_clock clock_monotonic = {
72807 .clock_getres = hrtimer_get_res,
72808 .clock_get = posix_ktime_get_ts,
72809 .clock_set = do_posix_clock_nosettime,
72810 };
72811- struct k_clock clock_monotonic_raw = {
72812+ static struct k_clock clock_monotonic_raw = {
72813 .clock_getres = hrtimer_get_res,
72814 .clock_get = posix_get_monotonic_raw,
72815 .clock_set = do_posix_clock_nosettime,
72816 .timer_create = no_timer_create,
72817 .nsleep = no_nsleep,
72818 };
72819- struct k_clock clock_realtime_coarse = {
72820+ static struct k_clock clock_realtime_coarse = {
72821 .clock_getres = posix_get_coarse_res,
72822 .clock_get = posix_get_realtime_coarse,
72823 .clock_set = do_posix_clock_nosettime,
72824 .timer_create = no_timer_create,
72825 .nsleep = no_nsleep,
72826 };
72827- struct k_clock clock_monotonic_coarse = {
72828+ static struct k_clock clock_monotonic_coarse = {
72829 .clock_getres = posix_get_coarse_res,
72830 .clock_get = posix_get_monotonic_coarse,
72831 .clock_set = do_posix_clock_nosettime,
72832@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
72833 .nsleep = no_nsleep,
72834 };
72835
72836+ pax_track_stack();
72837+
72838 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
72839 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
72840 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
72841@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
72842 return;
72843 }
72844
72845- posix_clocks[clock_id] = *new_clock;
72846+ posix_clocks[clock_id] = new_clock;
72847 }
72848 EXPORT_SYMBOL_GPL(register_posix_clock);
72849
72850@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
72851 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
72852 return -EFAULT;
72853
72854+ /* only the CLOCK_REALTIME clock can be set, all other clocks
72855+ have their clock_set fptr set to a nosettime dummy function
72856+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
72857+ call common_clock_set, which calls do_sys_settimeofday, which
72858+ we hook
72859+ */
72860+
72861 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
72862 }
72863
72864diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
72865index 04a9e90..bc355aa 100644
72866--- a/kernel/power/hibernate.c
72867+++ b/kernel/power/hibernate.c
72868@@ -48,14 +48,14 @@ enum {
72869
72870 static int hibernation_mode = HIBERNATION_SHUTDOWN;
72871
72872-static struct platform_hibernation_ops *hibernation_ops;
72873+static const struct platform_hibernation_ops *hibernation_ops;
72874
72875 /**
72876 * hibernation_set_ops - set the global hibernate operations
72877 * @ops: the hibernation operations to use in subsequent hibernation transitions
72878 */
72879
72880-void hibernation_set_ops(struct platform_hibernation_ops *ops)
72881+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
72882 {
72883 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
72884 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
72885diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
72886index e8b3370..484c2e4 100644
72887--- a/kernel/power/poweroff.c
72888+++ b/kernel/power/poweroff.c
72889@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
72890 .enable_mask = SYSRQ_ENABLE_BOOT,
72891 };
72892
72893-static int pm_sysrq_init(void)
72894+static int __init pm_sysrq_init(void)
72895 {
72896 register_sysrq_key('o', &sysrq_poweroff_op);
72897 return 0;
72898diff --git a/kernel/power/process.c b/kernel/power/process.c
72899index e7cd671..56d5f459 100644
72900--- a/kernel/power/process.c
72901+++ b/kernel/power/process.c
72902@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
72903 struct timeval start, end;
72904 u64 elapsed_csecs64;
72905 unsigned int elapsed_csecs;
72906+ bool timedout = false;
72907
72908 do_gettimeofday(&start);
72909
72910 end_time = jiffies + TIMEOUT;
72911 do {
72912 todo = 0;
72913+ if (time_after(jiffies, end_time))
72914+ timedout = true;
72915 read_lock(&tasklist_lock);
72916 do_each_thread(g, p) {
72917 if (frozen(p) || !freezeable(p))
72918@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
72919 * It is "frozen enough". If the task does wake
72920 * up, it will immediately call try_to_freeze.
72921 */
72922- if (!task_is_stopped_or_traced(p) &&
72923- !freezer_should_skip(p))
72924+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
72925 todo++;
72926+ if (timedout) {
72927+ printk(KERN_ERR "Task refusing to freeze:\n");
72928+ sched_show_task(p);
72929+ }
72930+ }
72931 } while_each_thread(g, p);
72932 read_unlock(&tasklist_lock);
72933 yield(); /* Yield is okay here */
72934- if (time_after(jiffies, end_time))
72935- break;
72936- } while (todo);
72937+ } while (todo && !timedout);
72938
72939 do_gettimeofday(&end);
72940 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
72941diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
72942index 40dd021..fb30ceb 100644
72943--- a/kernel/power/suspend.c
72944+++ b/kernel/power/suspend.c
72945@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
72946 [PM_SUSPEND_MEM] = "mem",
72947 };
72948
72949-static struct platform_suspend_ops *suspend_ops;
72950+static const struct platform_suspend_ops *suspend_ops;
72951
72952 /**
72953 * suspend_set_ops - Set the global suspend method table.
72954 * @ops: Pointer to ops structure.
72955 */
72956-void suspend_set_ops(struct platform_suspend_ops *ops)
72957+void suspend_set_ops(const struct platform_suspend_ops *ops)
72958 {
72959 mutex_lock(&pm_mutex);
72960 suspend_ops = ops;
72961diff --git a/kernel/printk.c b/kernel/printk.c
72962index 4cade47..637e78a 100644
72963--- a/kernel/printk.c
72964+++ b/kernel/printk.c
72965@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf, int len)
72966 char c;
72967 int error = 0;
72968
72969+#ifdef CONFIG_GRKERNSEC_DMESG
72970+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
72971+ return -EPERM;
72972+#endif
72973+
72974 error = security_syslog(type);
72975 if (error)
72976 return error;
72977diff --git a/kernel/profile.c b/kernel/profile.c
72978index dfadc5b..7f59404 100644
72979--- a/kernel/profile.c
72980+++ b/kernel/profile.c
72981@@ -39,7 +39,7 @@ struct profile_hit {
72982 /* Oprofile timer tick hook */
72983 static int (*timer_hook)(struct pt_regs *) __read_mostly;
72984
72985-static atomic_t *prof_buffer;
72986+static atomic_unchecked_t *prof_buffer;
72987 static unsigned long prof_len, prof_shift;
72988
72989 int prof_on __read_mostly;
72990@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
72991 hits[i].pc = 0;
72992 continue;
72993 }
72994- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
72995+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
72996 hits[i].hits = hits[i].pc = 0;
72997 }
72998 }
72999@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73000 * Add the current hit(s) and flush the write-queue out
73001 * to the global buffer:
73002 */
73003- atomic_add(nr_hits, &prof_buffer[pc]);
73004+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
73005 for (i = 0; i < NR_PROFILE_HIT; ++i) {
73006- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73007+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73008 hits[i].pc = hits[i].hits = 0;
73009 }
73010 out:
73011@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73012 if (prof_on != type || !prof_buffer)
73013 return;
73014 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
73015- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73016+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73017 }
73018 #endif /* !CONFIG_SMP */
73019 EXPORT_SYMBOL_GPL(profile_hits);
73020@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
73021 return -EFAULT;
73022 buf++; p++; count--; read++;
73023 }
73024- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
73025+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
73026 if (copy_to_user(buf, (void *)pnt, count))
73027 return -EFAULT;
73028 read += count;
73029@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
73030 }
73031 #endif
73032 profile_discard_flip_buffers();
73033- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
73034+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
73035 return count;
73036 }
73037
73038diff --git a/kernel/ptrace.c b/kernel/ptrace.c
73039index 05625f6..733bf70 100644
73040--- a/kernel/ptrace.c
73041+++ b/kernel/ptrace.c
73042@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
73043 return ret;
73044 }
73045
73046-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73047+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
73048+ unsigned int log)
73049 {
73050 const struct cred *cred = current_cred(), *tcred;
73051
73052@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73053 cred->gid != tcred->egid ||
73054 cred->gid != tcred->sgid ||
73055 cred->gid != tcred->gid) &&
73056- !capable(CAP_SYS_PTRACE)) {
73057+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73058+ (log && !capable(CAP_SYS_PTRACE)))
73059+ ) {
73060 rcu_read_unlock();
73061 return -EPERM;
73062 }
73063@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73064 smp_rmb();
73065 if (task->mm)
73066 dumpable = get_dumpable(task->mm);
73067- if (!dumpable && !capable(CAP_SYS_PTRACE))
73068+ if (!dumpable &&
73069+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73070+ (log && !capable(CAP_SYS_PTRACE))))
73071 return -EPERM;
73072
73073 return security_ptrace_access_check(task, mode);
73074@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
73075 {
73076 int err;
73077 task_lock(task);
73078- err = __ptrace_may_access(task, mode);
73079+ err = __ptrace_may_access(task, mode, 0);
73080+ task_unlock(task);
73081+ return !err;
73082+}
73083+
73084+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
73085+{
73086+ int err;
73087+ task_lock(task);
73088+ err = __ptrace_may_access(task, mode, 1);
73089 task_unlock(task);
73090 return !err;
73091 }
73092@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
73093 goto out;
73094
73095 task_lock(task);
73096- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
73097+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
73098 task_unlock(task);
73099 if (retval)
73100 goto unlock_creds;
73101@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
73102 goto unlock_tasklist;
73103
73104 task->ptrace = PT_PTRACED;
73105- if (capable(CAP_SYS_PTRACE))
73106+ if (capable_nolog(CAP_SYS_PTRACE))
73107 task->ptrace |= PT_PTRACE_CAP;
73108
73109 __ptrace_link(task, current);
73110@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
73111 {
73112 int copied = 0;
73113
73114+ pax_track_stack();
73115+
73116 while (len > 0) {
73117 char buf[128];
73118 int this_len, retval;
73119@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
73120 {
73121 int copied = 0;
73122
73123+ pax_track_stack();
73124+
73125 while (len > 0) {
73126 char buf[128];
73127 int this_len, retval;
73128@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
73129 int ret = -EIO;
73130 siginfo_t siginfo;
73131
73132+ pax_track_stack();
73133+
73134 switch (request) {
73135 case PTRACE_PEEKTEXT:
73136 case PTRACE_PEEKDATA:
73137@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
73138 ret = ptrace_setoptions(child, data);
73139 break;
73140 case PTRACE_GETEVENTMSG:
73141- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
73142+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
73143 break;
73144
73145 case PTRACE_GETSIGINFO:
73146 ret = ptrace_getsiginfo(child, &siginfo);
73147 if (!ret)
73148- ret = copy_siginfo_to_user((siginfo_t __user *) data,
73149+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
73150 &siginfo);
73151 break;
73152
73153 case PTRACE_SETSIGINFO:
73154- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
73155+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
73156 sizeof siginfo))
73157 ret = -EFAULT;
73158 else
73159@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
73160 goto out;
73161 }
73162
73163+ if (gr_handle_ptrace(child, request)) {
73164+ ret = -EPERM;
73165+ goto out_put_task_struct;
73166+ }
73167+
73168 if (request == PTRACE_ATTACH) {
73169 ret = ptrace_attach(child);
73170 /*
73171 * Some architectures need to do book-keeping after
73172 * a ptrace attach.
73173 */
73174- if (!ret)
73175+ if (!ret) {
73176 arch_ptrace_attach(child);
73177+ gr_audit_ptrace(child);
73178+ }
73179 goto out_put_task_struct;
73180 }
73181
73182@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
73183 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
73184 if (copied != sizeof(tmp))
73185 return -EIO;
73186- return put_user(tmp, (unsigned long __user *)data);
73187+ return put_user(tmp, (__force unsigned long __user *)data);
73188 }
73189
73190 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
73191@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
73192 siginfo_t siginfo;
73193 int ret;
73194
73195+ pax_track_stack();
73196+
73197 switch (request) {
73198 case PTRACE_PEEKTEXT:
73199 case PTRACE_PEEKDATA:
73200@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
73201 goto out;
73202 }
73203
73204+ if (gr_handle_ptrace(child, request)) {
73205+ ret = -EPERM;
73206+ goto out_put_task_struct;
73207+ }
73208+
73209 if (request == PTRACE_ATTACH) {
73210 ret = ptrace_attach(child);
73211 /*
73212 * Some architectures need to do book-keeping after
73213 * a ptrace attach.
73214 */
73215- if (!ret)
73216+ if (!ret) {
73217 arch_ptrace_attach(child);
73218+ gr_audit_ptrace(child);
73219+ }
73220 goto out_put_task_struct;
73221 }
73222
73223diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
73224index 697c0a0..2402696 100644
73225--- a/kernel/rcutorture.c
73226+++ b/kernel/rcutorture.c
73227@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
73228 { 0 };
73229 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
73230 { 0 };
73231-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73232-static atomic_t n_rcu_torture_alloc;
73233-static atomic_t n_rcu_torture_alloc_fail;
73234-static atomic_t n_rcu_torture_free;
73235-static atomic_t n_rcu_torture_mberror;
73236-static atomic_t n_rcu_torture_error;
73237+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73238+static atomic_unchecked_t n_rcu_torture_alloc;
73239+static atomic_unchecked_t n_rcu_torture_alloc_fail;
73240+static atomic_unchecked_t n_rcu_torture_free;
73241+static atomic_unchecked_t n_rcu_torture_mberror;
73242+static atomic_unchecked_t n_rcu_torture_error;
73243 static long n_rcu_torture_timers;
73244 static struct list_head rcu_torture_removed;
73245 static cpumask_var_t shuffle_tmp_mask;
73246@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
73247
73248 spin_lock_bh(&rcu_torture_lock);
73249 if (list_empty(&rcu_torture_freelist)) {
73250- atomic_inc(&n_rcu_torture_alloc_fail);
73251+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
73252 spin_unlock_bh(&rcu_torture_lock);
73253 return NULL;
73254 }
73255- atomic_inc(&n_rcu_torture_alloc);
73256+ atomic_inc_unchecked(&n_rcu_torture_alloc);
73257 p = rcu_torture_freelist.next;
73258 list_del_init(p);
73259 spin_unlock_bh(&rcu_torture_lock);
73260@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
73261 static void
73262 rcu_torture_free(struct rcu_torture *p)
73263 {
73264- atomic_inc(&n_rcu_torture_free);
73265+ atomic_inc_unchecked(&n_rcu_torture_free);
73266 spin_lock_bh(&rcu_torture_lock);
73267 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
73268 spin_unlock_bh(&rcu_torture_lock);
73269@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
73270 i = rp->rtort_pipe_count;
73271 if (i > RCU_TORTURE_PIPE_LEN)
73272 i = RCU_TORTURE_PIPE_LEN;
73273- atomic_inc(&rcu_torture_wcount[i]);
73274+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73275 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73276 rp->rtort_mbtest = 0;
73277 rcu_torture_free(rp);
73278@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
73279 i = rp->rtort_pipe_count;
73280 if (i > RCU_TORTURE_PIPE_LEN)
73281 i = RCU_TORTURE_PIPE_LEN;
73282- atomic_inc(&rcu_torture_wcount[i]);
73283+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73284 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73285 rp->rtort_mbtest = 0;
73286 list_del(&rp->rtort_free);
73287@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
73288 i = old_rp->rtort_pipe_count;
73289 if (i > RCU_TORTURE_PIPE_LEN)
73290 i = RCU_TORTURE_PIPE_LEN;
73291- atomic_inc(&rcu_torture_wcount[i]);
73292+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
73293 old_rp->rtort_pipe_count++;
73294 cur_ops->deferred_free(old_rp);
73295 }
73296@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
73297 return;
73298 }
73299 if (p->rtort_mbtest == 0)
73300- atomic_inc(&n_rcu_torture_mberror);
73301+ atomic_inc_unchecked(&n_rcu_torture_mberror);
73302 spin_lock(&rand_lock);
73303 cur_ops->read_delay(&rand);
73304 n_rcu_torture_timers++;
73305@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
73306 continue;
73307 }
73308 if (p->rtort_mbtest == 0)
73309- atomic_inc(&n_rcu_torture_mberror);
73310+ atomic_inc_unchecked(&n_rcu_torture_mberror);
73311 cur_ops->read_delay(&rand);
73312 preempt_disable();
73313 pipe_count = p->rtort_pipe_count;
73314@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
73315 rcu_torture_current,
73316 rcu_torture_current_version,
73317 list_empty(&rcu_torture_freelist),
73318- atomic_read(&n_rcu_torture_alloc),
73319- atomic_read(&n_rcu_torture_alloc_fail),
73320- atomic_read(&n_rcu_torture_free),
73321- atomic_read(&n_rcu_torture_mberror),
73322+ atomic_read_unchecked(&n_rcu_torture_alloc),
73323+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
73324+ atomic_read_unchecked(&n_rcu_torture_free),
73325+ atomic_read_unchecked(&n_rcu_torture_mberror),
73326 n_rcu_torture_timers);
73327- if (atomic_read(&n_rcu_torture_mberror) != 0)
73328+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
73329 cnt += sprintf(&page[cnt], " !!!");
73330 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
73331 if (i > 1) {
73332 cnt += sprintf(&page[cnt], "!!! ");
73333- atomic_inc(&n_rcu_torture_error);
73334+ atomic_inc_unchecked(&n_rcu_torture_error);
73335 WARN_ON_ONCE(1);
73336 }
73337 cnt += sprintf(&page[cnt], "Reader Pipe: ");
73338@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
73339 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
73340 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73341 cnt += sprintf(&page[cnt], " %d",
73342- atomic_read(&rcu_torture_wcount[i]));
73343+ atomic_read_unchecked(&rcu_torture_wcount[i]));
73344 }
73345 cnt += sprintf(&page[cnt], "\n");
73346 if (cur_ops->stats)
73347@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
73348
73349 if (cur_ops->cleanup)
73350 cur_ops->cleanup();
73351- if (atomic_read(&n_rcu_torture_error))
73352+ if (atomic_read_unchecked(&n_rcu_torture_error))
73353 rcu_torture_print_module_parms("End of test: FAILURE");
73354 else
73355 rcu_torture_print_module_parms("End of test: SUCCESS");
73356@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
73357
73358 rcu_torture_current = NULL;
73359 rcu_torture_current_version = 0;
73360- atomic_set(&n_rcu_torture_alloc, 0);
73361- atomic_set(&n_rcu_torture_alloc_fail, 0);
73362- atomic_set(&n_rcu_torture_free, 0);
73363- atomic_set(&n_rcu_torture_mberror, 0);
73364- atomic_set(&n_rcu_torture_error, 0);
73365+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
73366+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
73367+ atomic_set_unchecked(&n_rcu_torture_free, 0);
73368+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
73369+ atomic_set_unchecked(&n_rcu_torture_error, 0);
73370 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
73371- atomic_set(&rcu_torture_wcount[i], 0);
73372+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
73373 for_each_possible_cpu(cpu) {
73374 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73375 per_cpu(rcu_torture_count, cpu)[i] = 0;
73376diff --git a/kernel/rcutree.c b/kernel/rcutree.c
73377index 683c4f3..97f54c6 100644
73378--- a/kernel/rcutree.c
73379+++ b/kernel/rcutree.c
73380@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
73381 /*
73382 * Do softirq processing for the current CPU.
73383 */
73384-static void rcu_process_callbacks(struct softirq_action *unused)
73385+static void rcu_process_callbacks(void)
73386 {
73387 /*
73388 * Memory references from any prior RCU read-side critical sections
73389diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
73390index c03edf7..ac1b341 100644
73391--- a/kernel/rcutree_plugin.h
73392+++ b/kernel/rcutree_plugin.h
73393@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
73394 */
73395 void __rcu_read_lock(void)
73396 {
73397- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
73398+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
73399 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
73400 }
73401 EXPORT_SYMBOL_GPL(__rcu_read_lock);
73402@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
73403 struct task_struct *t = current;
73404
73405 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
73406- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
73407+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
73408 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
73409 rcu_read_unlock_special(t);
73410 }
73411diff --git a/kernel/relay.c b/kernel/relay.c
73412index 760c262..a9fd241 100644
73413--- a/kernel/relay.c
73414+++ b/kernel/relay.c
73415@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
73416 unsigned int flags,
73417 int *nonpad_ret)
73418 {
73419- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
73420+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
73421 struct rchan_buf *rbuf = in->private_data;
73422 unsigned int subbuf_size = rbuf->chan->subbuf_size;
73423 uint64_t pos = (uint64_t) *ppos;
73424@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
73425 .ops = &relay_pipe_buf_ops,
73426 .spd_release = relay_page_release,
73427 };
73428+ ssize_t ret;
73429+
73430+ pax_track_stack();
73431
73432 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
73433 return 0;
73434diff --git a/kernel/resource.c b/kernel/resource.c
73435index fb11a58..4e61ae1 100644
73436--- a/kernel/resource.c
73437+++ b/kernel/resource.c
73438@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
73439
73440 static int __init ioresources_init(void)
73441 {
73442+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73443+#ifdef CONFIG_GRKERNSEC_PROC_USER
73444+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
73445+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
73446+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73447+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
73448+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
73449+#endif
73450+#else
73451 proc_create("ioports", 0, NULL, &proc_ioports_operations);
73452 proc_create("iomem", 0, NULL, &proc_iomem_operations);
73453+#endif
73454 return 0;
73455 }
73456 __initcall(ioresources_init);
73457diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
73458index a56f629..1fc4989 100644
73459--- a/kernel/rtmutex-tester.c
73460+++ b/kernel/rtmutex-tester.c
73461@@ -21,7 +21,7 @@
73462 #define MAX_RT_TEST_MUTEXES 8
73463
73464 static spinlock_t rttest_lock;
73465-static atomic_t rttest_event;
73466+static atomic_unchecked_t rttest_event;
73467
73468 struct test_thread_data {
73469 int opcode;
73470@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73471
73472 case RTTEST_LOCKCONT:
73473 td->mutexes[td->opdata] = 1;
73474- td->event = atomic_add_return(1, &rttest_event);
73475+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73476 return 0;
73477
73478 case RTTEST_RESET:
73479@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73480 return 0;
73481
73482 case RTTEST_RESETEVENT:
73483- atomic_set(&rttest_event, 0);
73484+ atomic_set_unchecked(&rttest_event, 0);
73485 return 0;
73486
73487 default:
73488@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73489 return ret;
73490
73491 td->mutexes[id] = 1;
73492- td->event = atomic_add_return(1, &rttest_event);
73493+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73494 rt_mutex_lock(&mutexes[id]);
73495- td->event = atomic_add_return(1, &rttest_event);
73496+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73497 td->mutexes[id] = 4;
73498 return 0;
73499
73500@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73501 return ret;
73502
73503 td->mutexes[id] = 1;
73504- td->event = atomic_add_return(1, &rttest_event);
73505+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73506 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
73507- td->event = atomic_add_return(1, &rttest_event);
73508+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73509 td->mutexes[id] = ret ? 0 : 4;
73510 return ret ? -EINTR : 0;
73511
73512@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73513 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
73514 return ret;
73515
73516- td->event = atomic_add_return(1, &rttest_event);
73517+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73518 rt_mutex_unlock(&mutexes[id]);
73519- td->event = atomic_add_return(1, &rttest_event);
73520+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73521 td->mutexes[id] = 0;
73522 return 0;
73523
73524@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73525 break;
73526
73527 td->mutexes[dat] = 2;
73528- td->event = atomic_add_return(1, &rttest_event);
73529+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73530 break;
73531
73532 case RTTEST_LOCKBKL:
73533@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73534 return;
73535
73536 td->mutexes[dat] = 3;
73537- td->event = atomic_add_return(1, &rttest_event);
73538+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73539 break;
73540
73541 case RTTEST_LOCKNOWAIT:
73542@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
73543 return;
73544
73545 td->mutexes[dat] = 1;
73546- td->event = atomic_add_return(1, &rttest_event);
73547+ td->event = atomic_add_return_unchecked(1, &rttest_event);
73548 return;
73549
73550 case RTTEST_LOCKBKL:
73551diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
73552index 29bd4ba..8c5de90 100644
73553--- a/kernel/rtmutex.c
73554+++ b/kernel/rtmutex.c
73555@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
73556 */
73557 spin_lock_irqsave(&pendowner->pi_lock, flags);
73558
73559- WARN_ON(!pendowner->pi_blocked_on);
73560+ BUG_ON(!pendowner->pi_blocked_on);
73561 WARN_ON(pendowner->pi_blocked_on != waiter);
73562 WARN_ON(pendowner->pi_blocked_on->lock != lock);
73563
73564diff --git a/kernel/sched.c b/kernel/sched.c
73565index 0591df8..6e343c3 100644
73566--- a/kernel/sched.c
73567+++ b/kernel/sched.c
73568@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
73569 {
73570 unsigned long flags;
73571 struct rq *rq;
73572- int cpu = get_cpu();
73573
73574 #ifdef CONFIG_SMP
73575+ int cpu = get_cpu();
73576+
73577 rq = task_rq_lock(p, &flags);
73578 p->state = TASK_WAKING;
73579
73580@@ -5043,7 +5044,7 @@ out:
73581 * In CONFIG_NO_HZ case, the idle load balance owner will do the
73582 * rebalancing for all the cpus for whom scheduler ticks are stopped.
73583 */
73584-static void run_rebalance_domains(struct softirq_action *h)
73585+static void run_rebalance_domains(void)
73586 {
73587 int this_cpu = smp_processor_id();
73588 struct rq *this_rq = cpu_rq(this_cpu);
73589@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
73590 struct rq *rq;
73591 int cpu;
73592
73593+ pax_track_stack();
73594+
73595 need_resched:
73596 preempt_disable();
73597 cpu = smp_processor_id();
73598@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
73599 * Look out! "owner" is an entirely speculative pointer
73600 * access and not reliable.
73601 */
73602-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73603+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
73604 {
73605 unsigned int cpu;
73606 struct rq *rq;
73607@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73608 * DEBUG_PAGEALLOC could have unmapped it if
73609 * the mutex owner just released it and exited.
73610 */
73611- if (probe_kernel_address(&owner->cpu, cpu))
73612+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
73613 return 0;
73614 #else
73615- cpu = owner->cpu;
73616+ cpu = task_thread_info(owner)->cpu;
73617 #endif
73618
73619 /*
73620@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
73621 /*
73622 * Is that owner really running on that cpu?
73623 */
73624- if (task_thread_info(rq->curr) != owner || need_resched())
73625+ if (rq->curr != owner || need_resched())
73626 return 0;
73627
73628 cpu_relax();
73629@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p, const int nice)
73630 /* convert nice value [19,-20] to rlimit style value [1,40] */
73631 int nice_rlim = 20 - nice;
73632
73633+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
73634+
73635 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
73636 capable(CAP_SYS_NICE));
73637 }
73638@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
73639 if (nice > 19)
73640 nice = 19;
73641
73642- if (increment < 0 && !can_nice(current, nice))
73643+ if (increment < 0 && (!can_nice(current, nice) ||
73644+ gr_handle_chroot_nice()))
73645 return -EPERM;
73646
73647 retval = security_task_setnice(current, nice);
73648@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
73649 long power;
73650 int weight;
73651
73652- WARN_ON(!sd || !sd->groups);
73653+ BUG_ON(!sd || !sd->groups);
73654
73655 if (cpu != group_first_cpu(sd->groups))
73656 return;
73657diff --git a/kernel/signal.c b/kernel/signal.c
73658index 2494827..cda80a0 100644
73659--- a/kernel/signal.c
73660+++ b/kernel/signal.c
73661@@ -41,12 +41,12 @@
73662
73663 static struct kmem_cache *sigqueue_cachep;
73664
73665-static void __user *sig_handler(struct task_struct *t, int sig)
73666+static __sighandler_t sig_handler(struct task_struct *t, int sig)
73667 {
73668 return t->sighand->action[sig - 1].sa.sa_handler;
73669 }
73670
73671-static int sig_handler_ignored(void __user *handler, int sig)
73672+static int sig_handler_ignored(__sighandler_t handler, int sig)
73673 {
73674 /* Is it explicitly or implicitly ignored? */
73675 return handler == SIG_IGN ||
73676@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
73677 static int sig_task_ignored(struct task_struct *t, int sig,
73678 int from_ancestor_ns)
73679 {
73680- void __user *handler;
73681+ __sighandler_t handler;
73682
73683 handler = sig_handler(t, sig);
73684
73685@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
73686 */
73687 user = get_uid(__task_cred(t)->user);
73688 atomic_inc(&user->sigpending);
73689+
73690+ if (!override_rlimit)
73691+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
73692 if (override_rlimit ||
73693 atomic_read(&user->sigpending) <=
73694 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
73695@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
73696
73697 int unhandled_signal(struct task_struct *tsk, int sig)
73698 {
73699- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
73700+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
73701 if (is_global_init(tsk))
73702 return 1;
73703 if (handler != SIG_IGN && handler != SIG_DFL)
73704@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
73705 }
73706 }
73707
73708+ /* allow glibc communication via tgkill to other threads in our
73709+ thread group */
73710+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
73711+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
73712+ && gr_handle_signal(t, sig))
73713+ return -EPERM;
73714+
73715 return security_task_kill(t, info, sig, 0);
73716 }
73717
73718@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73719 return send_signal(sig, info, p, 1);
73720 }
73721
73722-static int
73723+int
73724 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73725 {
73726 return send_signal(sig, info, t, 0);
73727@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73728 unsigned long int flags;
73729 int ret, blocked, ignored;
73730 struct k_sigaction *action;
73731+ int is_unhandled = 0;
73732
73733 spin_lock_irqsave(&t->sighand->siglock, flags);
73734 action = &t->sighand->action[sig-1];
73735@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
73736 }
73737 if (action->sa.sa_handler == SIG_DFL)
73738 t->signal->flags &= ~SIGNAL_UNKILLABLE;
73739+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
73740+ is_unhandled = 1;
73741 ret = specific_send_sig_info(sig, info, t);
73742 spin_unlock_irqrestore(&t->sighand->siglock, flags);
73743
73744+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
73745+ normal operation */
73746+ if (is_unhandled) {
73747+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
73748+ gr_handle_crash(t, sig);
73749+ }
73750+
73751 return ret;
73752 }
73753
73754@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
73755 {
73756 int ret = check_kill_permission(sig, info, p);
73757
73758- if (!ret && sig)
73759+ if (!ret && sig) {
73760 ret = do_send_sig_info(sig, info, p, true);
73761+ if (!ret)
73762+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
73763+ }
73764
73765 return ret;
73766 }
73767@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
73768 {
73769 siginfo_t info;
73770
73771+ pax_track_stack();
73772+
73773 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
73774
73775 memset(&info, 0, sizeof info);
73776@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
73777 int error = -ESRCH;
73778
73779 rcu_read_lock();
73780- p = find_task_by_vpid(pid);
73781+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73782+ /* allow glibc communication via tgkill to other threads in our
73783+ thread group */
73784+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
73785+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
73786+ p = find_task_by_vpid_unrestricted(pid);
73787+ else
73788+#endif
73789+ p = find_task_by_vpid(pid);
73790 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
73791 error = check_kill_permission(sig, info, p);
73792 /*
73793diff --git a/kernel/smp.c b/kernel/smp.c
73794index aa9cff3..631a0de 100644
73795--- a/kernel/smp.c
73796+++ b/kernel/smp.c
73797@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
73798 }
73799 EXPORT_SYMBOL(smp_call_function);
73800
73801-void ipi_call_lock(void)
73802+void ipi_call_lock(void) __acquires(call_function.lock)
73803 {
73804 spin_lock(&call_function.lock);
73805 }
73806
73807-void ipi_call_unlock(void)
73808+void ipi_call_unlock(void) __releases(call_function.lock)
73809 {
73810 spin_unlock(&call_function.lock);
73811 }
73812
73813-void ipi_call_lock_irq(void)
73814+void ipi_call_lock_irq(void) __acquires(call_function.lock)
73815 {
73816 spin_lock_irq(&call_function.lock);
73817 }
73818
73819-void ipi_call_unlock_irq(void)
73820+void ipi_call_unlock_irq(void) __releases(call_function.lock)
73821 {
73822 spin_unlock_irq(&call_function.lock);
73823 }
73824diff --git a/kernel/softirq.c b/kernel/softirq.c
73825index 04a0252..580c512 100644
73826--- a/kernel/softirq.c
73827+++ b/kernel/softirq.c
73828@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
73829
73830 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
73831
73832-char *softirq_to_name[NR_SOFTIRQS] = {
73833+const char * const softirq_to_name[NR_SOFTIRQS] = {
73834 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
73835 "TASKLET", "SCHED", "HRTIMER", "RCU"
73836 };
73837@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
73838
73839 asmlinkage void __do_softirq(void)
73840 {
73841- struct softirq_action *h;
73842+ const struct softirq_action *h;
73843 __u32 pending;
73844 int max_restart = MAX_SOFTIRQ_RESTART;
73845 int cpu;
73846@@ -233,7 +233,7 @@ restart:
73847 kstat_incr_softirqs_this_cpu(h - softirq_vec);
73848
73849 trace_softirq_entry(h, softirq_vec);
73850- h->action(h);
73851+ h->action();
73852 trace_softirq_exit(h, softirq_vec);
73853 if (unlikely(prev_count != preempt_count())) {
73854 printk(KERN_ERR "huh, entered softirq %td %s %p"
73855@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
73856 local_irq_restore(flags);
73857 }
73858
73859-void open_softirq(int nr, void (*action)(struct softirq_action *))
73860+void open_softirq(int nr, void (*action)(void))
73861 {
73862- softirq_vec[nr].action = action;
73863+ pax_open_kernel();
73864+ *(void **)&softirq_vec[nr].action = action;
73865+ pax_close_kernel();
73866 }
73867
73868 /*
73869@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
73870
73871 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
73872
73873-static void tasklet_action(struct softirq_action *a)
73874+static void tasklet_action(void)
73875 {
73876 struct tasklet_struct *list;
73877
73878@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
73879 }
73880 }
73881
73882-static void tasklet_hi_action(struct softirq_action *a)
73883+static void tasklet_hi_action(void)
73884 {
73885 struct tasklet_struct *list;
73886
73887diff --git a/kernel/sys.c b/kernel/sys.c
73888index e9512b1..3c265de 100644
73889--- a/kernel/sys.c
73890+++ b/kernel/sys.c
73891@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
73892 error = -EACCES;
73893 goto out;
73894 }
73895+
73896+ if (gr_handle_chroot_setpriority(p, niceval)) {
73897+ error = -EACCES;
73898+ goto out;
73899+ }
73900+
73901 no_nice = security_task_setnice(p, niceval);
73902 if (no_nice) {
73903 error = no_nice;
73904@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
73905 !(user = find_user(who)))
73906 goto out_unlock; /* No processes for this user */
73907
73908- do_each_thread(g, p)
73909+ do_each_thread(g, p) {
73910 if (__task_cred(p)->uid == who)
73911 error = set_one_prio(p, niceval, error);
73912- while_each_thread(g, p);
73913+ } while_each_thread(g, p);
73914 if (who != cred->uid)
73915 free_uid(user); /* For find_user() */
73916 break;
73917@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
73918 !(user = find_user(who)))
73919 goto out_unlock; /* No processes for this user */
73920
73921- do_each_thread(g, p)
73922+ do_each_thread(g, p) {
73923 if (__task_cred(p)->uid == who) {
73924 niceval = 20 - task_nice(p);
73925 if (niceval > retval)
73926 retval = niceval;
73927 }
73928- while_each_thread(g, p);
73929+ } while_each_thread(g, p);
73930 if (who != cred->uid)
73931 free_uid(user); /* for find_user() */
73932 break;
73933@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
73934 goto error;
73935 }
73936
73937+ if (gr_check_group_change(new->gid, new->egid, -1))
73938+ goto error;
73939+
73940 if (rgid != (gid_t) -1 ||
73941 (egid != (gid_t) -1 && egid != old->gid))
73942 new->sgid = new->egid;
73943@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
73944 goto error;
73945
73946 retval = -EPERM;
73947+
73948+ if (gr_check_group_change(gid, gid, gid))
73949+ goto error;
73950+
73951 if (capable(CAP_SETGID))
73952 new->gid = new->egid = new->sgid = new->fsgid = gid;
73953 else if (gid == old->gid || gid == old->sgid)
73954@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
73955 if (!new_user)
73956 return -EAGAIN;
73957
73958+ /*
73959+ * We don't fail in case of NPROC limit excess here because too many
73960+ * poorly written programs don't check set*uid() return code, assuming
73961+ * it never fails if called by root. We may still enforce NPROC limit
73962+ * for programs doing set*uid()+execve() by harmlessly deferring the
73963+ * failure to the execve() stage.
73964+ */
73965 if (atomic_read(&new_user->processes) >=
73966 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
73967- new_user != INIT_USER) {
73968- free_uid(new_user);
73969- return -EAGAIN;
73970- }
73971+ new_user != INIT_USER)
73972+ current->flags |= PF_NPROC_EXCEEDED;
73973+ else
73974+ current->flags &= ~PF_NPROC_EXCEEDED;
73975
73976 free_uid(new->user);
73977 new->user = new_user;
73978@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
73979 goto error;
73980 }
73981
73982+ if (gr_check_user_change(new->uid, new->euid, -1))
73983+ goto error;
73984+
73985 if (new->uid != old->uid) {
73986 retval = set_user(new);
73987 if (retval < 0)
73988@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
73989 goto error;
73990
73991 retval = -EPERM;
73992+
73993+ if (gr_check_crash_uid(uid))
73994+ goto error;
73995+ if (gr_check_user_change(uid, uid, uid))
73996+ goto error;
73997+
73998 if (capable(CAP_SETUID)) {
73999 new->suid = new->uid = uid;
74000 if (uid != old->uid) {
74001@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
74002 goto error;
74003 }
74004
74005+ if (gr_check_user_change(ruid, euid, -1))
74006+ goto error;
74007+
74008 if (ruid != (uid_t) -1) {
74009 new->uid = ruid;
74010 if (ruid != old->uid) {
74011@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
74012 goto error;
74013 }
74014
74015+ if (gr_check_group_change(rgid, egid, -1))
74016+ goto error;
74017+
74018 if (rgid != (gid_t) -1)
74019 new->gid = rgid;
74020 if (egid != (gid_t) -1)
74021@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
74022 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
74023 goto error;
74024
74025+ if (gr_check_user_change(-1, -1, uid))
74026+ goto error;
74027+
74028 if (uid == old->uid || uid == old->euid ||
74029 uid == old->suid || uid == old->fsuid ||
74030 capable(CAP_SETUID)) {
74031@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
74032 if (gid == old->gid || gid == old->egid ||
74033 gid == old->sgid || gid == old->fsgid ||
74034 capable(CAP_SETGID)) {
74035+ if (gr_check_group_change(-1, -1, gid))
74036+ goto error;
74037+
74038 if (gid != old_fsgid) {
74039 new->fsgid = gid;
74040 goto change_okay;
74041@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
74042 error = get_dumpable(me->mm);
74043 break;
74044 case PR_SET_DUMPABLE:
74045- if (arg2 < 0 || arg2 > 1) {
74046+ if (arg2 > 1) {
74047 error = -EINVAL;
74048 break;
74049 }
74050diff --git a/kernel/sysctl.c b/kernel/sysctl.c
74051index b8bd058..ab6a76be 100644
74052--- a/kernel/sysctl.c
74053+++ b/kernel/sysctl.c
74054@@ -63,6 +63,13 @@
74055 static int deprecated_sysctl_warning(struct __sysctl_args *args);
74056
74057 #if defined(CONFIG_SYSCTL)
74058+#include <linux/grsecurity.h>
74059+#include <linux/grinternal.h>
74060+
74061+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
74062+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74063+ const int op);
74064+extern int gr_handle_chroot_sysctl(const int op);
74065
74066 /* External variables not in a header file. */
74067 extern int C_A_D;
74068@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
74069 static int proc_taint(struct ctl_table *table, int write,
74070 void __user *buffer, size_t *lenp, loff_t *ppos);
74071 #endif
74072+extern ctl_table grsecurity_table[];
74073
74074 static struct ctl_table root_table[];
74075 static struct ctl_table_root sysctl_table_root;
74076@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
74077 int sysctl_legacy_va_layout;
74078 #endif
74079
74080+#ifdef CONFIG_PAX_SOFTMODE
74081+static ctl_table pax_table[] = {
74082+ {
74083+ .ctl_name = CTL_UNNUMBERED,
74084+ .procname = "softmode",
74085+ .data = &pax_softmode,
74086+ .maxlen = sizeof(unsigned int),
74087+ .mode = 0600,
74088+ .proc_handler = &proc_dointvec,
74089+ },
74090+
74091+ { .ctl_name = 0 }
74092+};
74093+#endif
74094+
74095 extern int prove_locking;
74096 extern int lock_stat;
74097
74098@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
74099 #endif
74100
74101 static struct ctl_table kern_table[] = {
74102+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
74103+ {
74104+ .ctl_name = CTL_UNNUMBERED,
74105+ .procname = "grsecurity",
74106+ .mode = 0500,
74107+ .child = grsecurity_table,
74108+ },
74109+#endif
74110+
74111+#ifdef CONFIG_PAX_SOFTMODE
74112+ {
74113+ .ctl_name = CTL_UNNUMBERED,
74114+ .procname = "pax",
74115+ .mode = 0500,
74116+ .child = pax_table,
74117+ },
74118+#endif
74119+
74120 {
74121 .ctl_name = CTL_UNNUMBERED,
74122 .procname = "sched_child_runs_first",
74123@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
74124 .data = &modprobe_path,
74125 .maxlen = KMOD_PATH_LEN,
74126 .mode = 0644,
74127- .proc_handler = &proc_dostring,
74128- .strategy = &sysctl_string,
74129+ .proc_handler = &proc_dostring_modpriv,
74130+ .strategy = &sysctl_string_modpriv,
74131 },
74132 {
74133 .ctl_name = CTL_UNNUMBERED,
74134@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
74135 .mode = 0644,
74136 .proc_handler = &proc_dointvec
74137 },
74138+ {
74139+ .procname = "heap_stack_gap",
74140+ .data = &sysctl_heap_stack_gap,
74141+ .maxlen = sizeof(sysctl_heap_stack_gap),
74142+ .mode = 0644,
74143+ .proc_handler = proc_doulongvec_minmax,
74144+ },
74145 #else
74146 {
74147 .ctl_name = CTL_UNNUMBERED,
74148@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
74149 return 0;
74150 }
74151
74152+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
74153+
74154 static int parse_table(int __user *name, int nlen,
74155 void __user *oldval, size_t __user *oldlenp,
74156 void __user *newval, size_t newlen,
74157@@ -1821,7 +1871,7 @@ repeat:
74158 if (n == table->ctl_name) {
74159 int error;
74160 if (table->child) {
74161- if (sysctl_perm(root, table, MAY_EXEC))
74162+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
74163 return -EPERM;
74164 name++;
74165 nlen--;
74166@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
74167 int error;
74168 int mode;
74169
74170+ if (table->parent != NULL && table->parent->procname != NULL &&
74171+ table->procname != NULL &&
74172+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
74173+ return -EACCES;
74174+ if (gr_handle_chroot_sysctl(op))
74175+ return -EACCES;
74176+ error = gr_handle_sysctl(table, op);
74177+ if (error)
74178+ return error;
74179+
74180+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74181+ if (error)
74182+ return error;
74183+
74184+ if (root->permissions)
74185+ mode = root->permissions(root, current->nsproxy, table);
74186+ else
74187+ mode = table->mode;
74188+
74189+ return test_perm(mode, op);
74190+}
74191+
74192+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
74193+{
74194+ int error;
74195+ int mode;
74196+
74197 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74198 if (error)
74199 return error;
74200@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
74201 buffer, lenp, ppos);
74202 }
74203
74204+int proc_dostring_modpriv(struct ctl_table *table, int write,
74205+ void __user *buffer, size_t *lenp, loff_t *ppos)
74206+{
74207+ if (write && !capable(CAP_SYS_MODULE))
74208+ return -EPERM;
74209+
74210+ return _proc_do_string(table->data, table->maxlen, write,
74211+ buffer, lenp, ppos);
74212+}
74213+
74214
74215 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
74216 int *valp,
74217@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
74218 vleft = table->maxlen / sizeof(unsigned long);
74219 left = *lenp;
74220
74221- for (; left && vleft--; i++, min++, max++, first=0) {
74222+ for (; left && vleft--; i++, first=0) {
74223 if (write) {
74224 while (left) {
74225 char c;
74226@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
74227 return -ENOSYS;
74228 }
74229
74230+int proc_dostring_modpriv(struct ctl_table *table, int write,
74231+ void __user *buffer, size_t *lenp, loff_t *ppos)
74232+{
74233+ return -ENOSYS;
74234+}
74235+
74236 int proc_dointvec(struct ctl_table *table, int write,
74237 void __user *buffer, size_t *lenp, loff_t *ppos)
74238 {
74239@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
74240 return 1;
74241 }
74242
74243+int sysctl_string_modpriv(struct ctl_table *table,
74244+ void __user *oldval, size_t __user *oldlenp,
74245+ void __user *newval, size_t newlen)
74246+{
74247+ if (newval && newlen && !capable(CAP_SYS_MODULE))
74248+ return -EPERM;
74249+
74250+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
74251+}
74252+
74253 /*
74254 * This function makes sure that all of the integers in the vector
74255 * are between the minimum and maximum values given in the arrays
74256@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
74257 return -ENOSYS;
74258 }
74259
74260+int sysctl_string_modpriv(struct ctl_table *table,
74261+ void __user *oldval, size_t __user *oldlenp,
74262+ void __user *newval, size_t newlen)
74263+{
74264+ return -ENOSYS;
74265+}
74266+
74267 int sysctl_intvec(struct ctl_table *table,
74268 void __user *oldval, size_t __user *oldlenp,
74269 void __user *newval, size_t newlen)
74270@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
74271 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
74272 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
74273 EXPORT_SYMBOL(proc_dostring);
74274+EXPORT_SYMBOL(proc_dostring_modpriv);
74275 EXPORT_SYMBOL(proc_doulongvec_minmax);
74276 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
74277 EXPORT_SYMBOL(register_sysctl_table);
74278@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
74279 EXPORT_SYMBOL(sysctl_jiffies);
74280 EXPORT_SYMBOL(sysctl_ms_jiffies);
74281 EXPORT_SYMBOL(sysctl_string);
74282+EXPORT_SYMBOL(sysctl_string_modpriv);
74283 EXPORT_SYMBOL(sysctl_data);
74284 EXPORT_SYMBOL(unregister_sysctl_table);
74285diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
74286index 469193c..ea3ecb2 100644
74287--- a/kernel/sysctl_check.c
74288+++ b/kernel/sysctl_check.c
74289@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
74290 } else {
74291 if ((table->strategy == sysctl_data) ||
74292 (table->strategy == sysctl_string) ||
74293+ (table->strategy == sysctl_string_modpriv) ||
74294 (table->strategy == sysctl_intvec) ||
74295 (table->strategy == sysctl_jiffies) ||
74296 (table->strategy == sysctl_ms_jiffies) ||
74297 (table->proc_handler == proc_dostring) ||
74298+ (table->proc_handler == proc_dostring_modpriv) ||
74299 (table->proc_handler == proc_dointvec) ||
74300 (table->proc_handler == proc_dointvec_minmax) ||
74301 (table->proc_handler == proc_dointvec_jiffies) ||
74302diff --git a/kernel/taskstats.c b/kernel/taskstats.c
74303index b080920..d344f89 100644
74304--- a/kernel/taskstats.c
74305+++ b/kernel/taskstats.c
74306@@ -26,9 +26,12 @@
74307 #include <linux/cgroup.h>
74308 #include <linux/fs.h>
74309 #include <linux/file.h>
74310+#include <linux/grsecurity.h>
74311 #include <net/genetlink.h>
74312 #include <asm/atomic.h>
74313
74314+extern int gr_is_taskstats_denied(int pid);
74315+
74316 /*
74317 * Maximum length of a cpumask that can be specified in
74318 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
74319@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
74320 size_t size;
74321 cpumask_var_t mask;
74322
74323+ if (gr_is_taskstats_denied(current->pid))
74324+ return -EACCES;
74325+
74326 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
74327 return -ENOMEM;
74328
74329diff --git a/kernel/time.c b/kernel/time.c
74330index 33df60e..ca768bd 100644
74331--- a/kernel/time.c
74332+++ b/kernel/time.c
74333@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
74334 return error;
74335
74336 if (tz) {
74337+ /* we log in do_settimeofday called below, so don't log twice
74338+ */
74339+ if (!tv)
74340+ gr_log_timechange();
74341+
74342 /* SMP safe, global irq locking makes it work. */
74343 sys_tz = *tz;
74344 update_vsyscall_tz();
74345@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
74346 * Avoid unnecessary multiplications/divisions in the
74347 * two most common HZ cases:
74348 */
74349-unsigned int inline jiffies_to_msecs(const unsigned long j)
74350+inline unsigned int jiffies_to_msecs(const unsigned long j)
74351 {
74352 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
74353 return (MSEC_PER_SEC / HZ) * j;
74354@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
74355 }
74356 EXPORT_SYMBOL(jiffies_to_msecs);
74357
74358-unsigned int inline jiffies_to_usecs(const unsigned long j)
74359+inline unsigned int jiffies_to_usecs(const unsigned long j)
74360 {
74361 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
74362 return (USEC_PER_SEC / HZ) * j;
74363diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
74364index 8917fd3..5f0ead6 100644
74365--- a/kernel/time/tick-broadcast.c
74366+++ b/kernel/time/tick-broadcast.c
74367@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
74368 * then clear the broadcast bit.
74369 */
74370 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
74371- int cpu = smp_processor_id();
74372+ cpu = smp_processor_id();
74373
74374 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
74375 tick_broadcast_clear_oneshot(cpu);
74376diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
74377index 1d1206a..08a7c2f 100644
74378--- a/kernel/time/timekeeping.c
74379+++ b/kernel/time/timekeeping.c
74380@@ -14,6 +14,7 @@
74381 #include <linux/init.h>
74382 #include <linux/mm.h>
74383 #include <linux/sched.h>
74384+#include <linux/grsecurity.h>
74385 #include <linux/sysdev.h>
74386 #include <linux/clocksource.h>
74387 #include <linux/jiffies.h>
74388@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
74389 */
74390 struct timespec ts = xtime;
74391 timespec_add_ns(&ts, nsec);
74392- ACCESS_ONCE(xtime_cache) = ts;
74393+ ACCESS_ONCE_RW(xtime_cache) = ts;
74394 }
74395
74396 /* must hold xtime_lock */
74397@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
74398 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
74399 return -EINVAL;
74400
74401+ gr_log_timechange();
74402+
74403 write_seqlock_irqsave(&xtime_lock, flags);
74404
74405 timekeeping_forward_now();
74406diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
74407index 54c0dda..e9095d9 100644
74408--- a/kernel/time/timer_list.c
74409+++ b/kernel/time/timer_list.c
74410@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
74411
74412 static void print_name_offset(struct seq_file *m, void *sym)
74413 {
74414+#ifdef CONFIG_GRKERNSEC_HIDESYM
74415+ SEQ_printf(m, "<%p>", NULL);
74416+#else
74417 char symname[KSYM_NAME_LEN];
74418
74419 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
74420 SEQ_printf(m, "<%p>", sym);
74421 else
74422 SEQ_printf(m, "%s", symname);
74423+#endif
74424 }
74425
74426 static void
74427@@ -112,7 +116,11 @@ next_one:
74428 static void
74429 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
74430 {
74431+#ifdef CONFIG_GRKERNSEC_HIDESYM
74432+ SEQ_printf(m, " .base: %p\n", NULL);
74433+#else
74434 SEQ_printf(m, " .base: %p\n", base);
74435+#endif
74436 SEQ_printf(m, " .index: %d\n",
74437 base->index);
74438 SEQ_printf(m, " .resolution: %Lu nsecs\n",
74439@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
74440 {
74441 struct proc_dir_entry *pe;
74442
74443+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74444+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
74445+#else
74446 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
74447+#endif
74448 if (!pe)
74449 return -ENOMEM;
74450 return 0;
74451diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
74452index ee5681f..634089b 100644
74453--- a/kernel/time/timer_stats.c
74454+++ b/kernel/time/timer_stats.c
74455@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
74456 static unsigned long nr_entries;
74457 static struct entry entries[MAX_ENTRIES];
74458
74459-static atomic_t overflow_count;
74460+static atomic_unchecked_t overflow_count;
74461
74462 /*
74463 * The entries are in a hash-table, for fast lookup:
74464@@ -140,7 +140,7 @@ static void reset_entries(void)
74465 nr_entries = 0;
74466 memset(entries, 0, sizeof(entries));
74467 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
74468- atomic_set(&overflow_count, 0);
74469+ atomic_set_unchecked(&overflow_count, 0);
74470 }
74471
74472 static struct entry *alloc_entry(void)
74473@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74474 if (likely(entry))
74475 entry->count++;
74476 else
74477- atomic_inc(&overflow_count);
74478+ atomic_inc_unchecked(&overflow_count);
74479
74480 out_unlock:
74481 spin_unlock_irqrestore(lock, flags);
74482@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74483
74484 static void print_name_offset(struct seq_file *m, unsigned long addr)
74485 {
74486+#ifdef CONFIG_GRKERNSEC_HIDESYM
74487+ seq_printf(m, "<%p>", NULL);
74488+#else
74489 char symname[KSYM_NAME_LEN];
74490
74491 if (lookup_symbol_name(addr, symname) < 0)
74492 seq_printf(m, "<%p>", (void *)addr);
74493 else
74494 seq_printf(m, "%s", symname);
74495+#endif
74496 }
74497
74498 static int tstats_show(struct seq_file *m, void *v)
74499@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
74500
74501 seq_puts(m, "Timer Stats Version: v0.2\n");
74502 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
74503- if (atomic_read(&overflow_count))
74504+ if (atomic_read_unchecked(&overflow_count))
74505 seq_printf(m, "Overflow: %d entries\n",
74506- atomic_read(&overflow_count));
74507+ atomic_read_unchecked(&overflow_count));
74508
74509 for (i = 0; i < nr_entries; i++) {
74510 entry = entries + i;
74511@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
74512 {
74513 struct proc_dir_entry *pe;
74514
74515+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74516+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
74517+#else
74518 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
74519+#endif
74520 if (!pe)
74521 return -ENOMEM;
74522 return 0;
74523diff --git a/kernel/timer.c b/kernel/timer.c
74524index cb3c1f1..8bf5526 100644
74525--- a/kernel/timer.c
74526+++ b/kernel/timer.c
74527@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
74528 /*
74529 * This function runs timers and the timer-tq in bottom half context.
74530 */
74531-static void run_timer_softirq(struct softirq_action *h)
74532+static void run_timer_softirq(void)
74533 {
74534 struct tvec_base *base = __get_cpu_var(tvec_bases);
74535
74536diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
74537index d9d6206..f19467e 100644
74538--- a/kernel/trace/blktrace.c
74539+++ b/kernel/trace/blktrace.c
74540@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
74541 struct blk_trace *bt = filp->private_data;
74542 char buf[16];
74543
74544- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
74545+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
74546
74547 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
74548 }
74549@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
74550 return 1;
74551
74552 bt = buf->chan->private_data;
74553- atomic_inc(&bt->dropped);
74554+ atomic_inc_unchecked(&bt->dropped);
74555 return 0;
74556 }
74557
74558@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
74559
74560 bt->dir = dir;
74561 bt->dev = dev;
74562- atomic_set(&bt->dropped, 0);
74563+ atomic_set_unchecked(&bt->dropped, 0);
74564
74565 ret = -EIO;
74566 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
74567diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
74568index 4872937..c794d40 100644
74569--- a/kernel/trace/ftrace.c
74570+++ b/kernel/trace/ftrace.c
74571@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
74572
74573 ip = rec->ip;
74574
74575+ ret = ftrace_arch_code_modify_prepare();
74576+ FTRACE_WARN_ON(ret);
74577+ if (ret)
74578+ return 0;
74579+
74580 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
74581+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
74582 if (ret) {
74583 ftrace_bug(ret, ip);
74584 rec->flags |= FTRACE_FL_FAILED;
74585- return 0;
74586 }
74587- return 1;
74588+ return ret ? 0 : 1;
74589 }
74590
74591 /*
74592diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
74593index e749a05..19c6e94 100644
74594--- a/kernel/trace/ring_buffer.c
74595+++ b/kernel/trace/ring_buffer.c
74596@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
74597 * the reader page). But if the next page is a header page,
74598 * its flags will be non zero.
74599 */
74600-static int inline
74601+static inline int
74602 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
74603 struct buffer_page *page, struct list_head *list)
74604 {
74605diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
74606index a2a2d1f..7f32b09 100644
74607--- a/kernel/trace/trace.c
74608+++ b/kernel/trace/trace.c
74609@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
74610 size_t rem;
74611 unsigned int i;
74612
74613+ pax_track_stack();
74614+
74615 /* copy the tracer to avoid using a global lock all around */
74616 mutex_lock(&trace_types_lock);
74617 if (unlikely(old_tracer != current_trace && current_trace)) {
74618@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
74619 int entries, size, i;
74620 size_t ret;
74621
74622+ pax_track_stack();
74623+
74624 if (*ppos & (PAGE_SIZE - 1)) {
74625 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
74626 return -EINVAL;
74627@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
74628 };
74629 #endif
74630
74631-static struct dentry *d_tracer;
74632-
74633 struct dentry *tracing_init_dentry(void)
74634 {
74635+ static struct dentry *d_tracer;
74636 static int once;
74637
74638 if (d_tracer)
74639@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
74640 return d_tracer;
74641 }
74642
74643-static struct dentry *d_percpu;
74644-
74645 struct dentry *tracing_dentry_percpu(void)
74646 {
74647+ static struct dentry *d_percpu;
74648 static int once;
74649 struct dentry *d_tracer;
74650
74651diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
74652index d128f65..f37b4af 100644
74653--- a/kernel/trace/trace_events.c
74654+++ b/kernel/trace/trace_events.c
74655@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
74656 * Modules must own their file_operations to keep up with
74657 * reference counting.
74658 */
74659+
74660 struct ftrace_module_file_ops {
74661 struct list_head list;
74662 struct module *mod;
74663- struct file_operations id;
74664- struct file_operations enable;
74665- struct file_operations format;
74666- struct file_operations filter;
74667 };
74668
74669 static void remove_subsystem_dir(const char *name)
74670@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
74671
74672 file_ops->mod = mod;
74673
74674- file_ops->id = ftrace_event_id_fops;
74675- file_ops->id.owner = mod;
74676-
74677- file_ops->enable = ftrace_enable_fops;
74678- file_ops->enable.owner = mod;
74679-
74680- file_ops->filter = ftrace_event_filter_fops;
74681- file_ops->filter.owner = mod;
74682-
74683- file_ops->format = ftrace_event_format_fops;
74684- file_ops->format.owner = mod;
74685+ pax_open_kernel();
74686+ *(void **)&mod->trace_id.owner = mod;
74687+ *(void **)&mod->trace_enable.owner = mod;
74688+ *(void **)&mod->trace_filter.owner = mod;
74689+ *(void **)&mod->trace_format.owner = mod;
74690+ pax_close_kernel();
74691
74692 list_add(&file_ops->list, &ftrace_module_file_list);
74693
74694@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
74695 call->mod = mod;
74696 list_add(&call->list, &ftrace_events);
74697 event_create_dir(call, d_events,
74698- &file_ops->id, &file_ops->enable,
74699- &file_ops->filter, &file_ops->format);
74700+ &mod->trace_id, &mod->trace_enable,
74701+ &mod->trace_filter, &mod->trace_format);
74702 }
74703 }
74704
74705diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
74706index 0acd834..b800b56 100644
74707--- a/kernel/trace/trace_mmiotrace.c
74708+++ b/kernel/trace/trace_mmiotrace.c
74709@@ -23,7 +23,7 @@ struct header_iter {
74710 static struct trace_array *mmio_trace_array;
74711 static bool overrun_detected;
74712 static unsigned long prev_overruns;
74713-static atomic_t dropped_count;
74714+static atomic_unchecked_t dropped_count;
74715
74716 static void mmio_reset_data(struct trace_array *tr)
74717 {
74718@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
74719
74720 static unsigned long count_overruns(struct trace_iterator *iter)
74721 {
74722- unsigned long cnt = atomic_xchg(&dropped_count, 0);
74723+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
74724 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
74725
74726 if (over > prev_overruns)
74727@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
74728 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
74729 sizeof(*entry), 0, pc);
74730 if (!event) {
74731- atomic_inc(&dropped_count);
74732+ atomic_inc_unchecked(&dropped_count);
74733 return;
74734 }
74735 entry = ring_buffer_event_data(event);
74736@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
74737 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
74738 sizeof(*entry), 0, pc);
74739 if (!event) {
74740- atomic_inc(&dropped_count);
74741+ atomic_inc_unchecked(&dropped_count);
74742 return;
74743 }
74744 entry = ring_buffer_event_data(event);
74745diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
74746index b6c12c6..41fdc53 100644
74747--- a/kernel/trace/trace_output.c
74748+++ b/kernel/trace/trace_output.c
74749@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
74750 return 0;
74751 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
74752 if (!IS_ERR(p)) {
74753- p = mangle_path(s->buffer + s->len, p, "\n");
74754+ p = mangle_path(s->buffer + s->len, p, "\n\\");
74755 if (p) {
74756 s->len = p - s->buffer;
74757 return 1;
74758diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
74759index 8504ac7..ecf0adb 100644
74760--- a/kernel/trace/trace_stack.c
74761+++ b/kernel/trace/trace_stack.c
74762@@ -50,7 +50,7 @@ static inline void check_stack(void)
74763 return;
74764
74765 /* we do not handle interrupt stacks yet */
74766- if (!object_is_on_stack(&this_size))
74767+ if (!object_starts_on_stack(&this_size))
74768 return;
74769
74770 local_irq_save(flags);
74771diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
74772index 40cafb0..d5ead43 100644
74773--- a/kernel/trace/trace_workqueue.c
74774+++ b/kernel/trace/trace_workqueue.c
74775@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
74776 int cpu;
74777 pid_t pid;
74778 /* Can be inserted from interrupt or user context, need to be atomic */
74779- atomic_t inserted;
74780+ atomic_unchecked_t inserted;
74781 /*
74782 * Don't need to be atomic, works are serialized in a single workqueue thread
74783 * on a single CPU.
74784@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
74785 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
74786 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
74787 if (node->pid == wq_thread->pid) {
74788- atomic_inc(&node->inserted);
74789+ atomic_inc_unchecked(&node->inserted);
74790 goto found;
74791 }
74792 }
74793@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
74794 tsk = get_pid_task(pid, PIDTYPE_PID);
74795 if (tsk) {
74796 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
74797- atomic_read(&cws->inserted), cws->executed,
74798+ atomic_read_unchecked(&cws->inserted), cws->executed,
74799 tsk->comm);
74800 put_task_struct(tsk);
74801 }
74802diff --git a/kernel/user.c b/kernel/user.c
74803index 1b91701..8795237 100644
74804--- a/kernel/user.c
74805+++ b/kernel/user.c
74806@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
74807 spin_lock_irq(&uidhash_lock);
74808 up = uid_hash_find(uid, hashent);
74809 if (up) {
74810+ put_user_ns(ns);
74811 key_put(new->uid_keyring);
74812 key_put(new->session_keyring);
74813 kmem_cache_free(uid_cachep, new);
74814diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
74815index 234ceb1..ad74049 100644
74816--- a/lib/Kconfig.debug
74817+++ b/lib/Kconfig.debug
74818@@ -905,7 +905,7 @@ config LATENCYTOP
74819 select STACKTRACE
74820 select SCHEDSTATS
74821 select SCHED_DEBUG
74822- depends on HAVE_LATENCYTOP_SUPPORT
74823+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
74824 help
74825 Enable this option if you want to use the LatencyTOP tool
74826 to find out which userspace is blocking on what kernel operations.
74827diff --git a/lib/bitmap.c b/lib/bitmap.c
74828index 7025658..8d14cab 100644
74829--- a/lib/bitmap.c
74830+++ b/lib/bitmap.c
74831@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
74832 {
74833 int c, old_c, totaldigits, ndigits, nchunks, nbits;
74834 u32 chunk;
74835- const char __user *ubuf = buf;
74836+ const char __user *ubuf = (const char __force_user *)buf;
74837
74838 bitmap_zero(maskp, nmaskbits);
74839
74840@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
74841 {
74842 if (!access_ok(VERIFY_READ, ubuf, ulen))
74843 return -EFAULT;
74844- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
74845+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
74846 }
74847 EXPORT_SYMBOL(bitmap_parse_user);
74848
74849diff --git a/lib/bug.c b/lib/bug.c
74850index 300e41a..2779eb0 100644
74851--- a/lib/bug.c
74852+++ b/lib/bug.c
74853@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
74854 return BUG_TRAP_TYPE_NONE;
74855
74856 bug = find_bug(bugaddr);
74857+ if (!bug)
74858+ return BUG_TRAP_TYPE_NONE;
74859
74860 printk(KERN_EMERG "------------[ cut here ]------------\n");
74861
74862diff --git a/lib/debugobjects.c b/lib/debugobjects.c
74863index 2b413db..e21d207 100644
74864--- a/lib/debugobjects.c
74865+++ b/lib/debugobjects.c
74866@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
74867 if (limit > 4)
74868 return;
74869
74870- is_on_stack = object_is_on_stack(addr);
74871+ is_on_stack = object_starts_on_stack(addr);
74872 if (is_on_stack == onstack)
74873 return;
74874
74875diff --git a/lib/devres.c b/lib/devres.c
74876index 72c8909..7543868 100644
74877--- a/lib/devres.c
74878+++ b/lib/devres.c
74879@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
74880 {
74881 iounmap(addr);
74882 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
74883- (void *)addr));
74884+ (void __force *)addr));
74885 }
74886 EXPORT_SYMBOL(devm_iounmap);
74887
74888@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
74889 {
74890 ioport_unmap(addr);
74891 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
74892- devm_ioport_map_match, (void *)addr));
74893+ devm_ioport_map_match, (void __force *)addr));
74894 }
74895 EXPORT_SYMBOL(devm_ioport_unmap);
74896
74897diff --git a/lib/dma-debug.c b/lib/dma-debug.c
74898index 084e879..0674448 100644
74899--- a/lib/dma-debug.c
74900+++ b/lib/dma-debug.c
74901@@ -861,7 +861,7 @@ out:
74902
74903 static void check_for_stack(struct device *dev, void *addr)
74904 {
74905- if (object_is_on_stack(addr))
74906+ if (object_starts_on_stack(addr))
74907 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
74908 "stack [addr=%p]\n", addr);
74909 }
74910diff --git a/lib/idr.c b/lib/idr.c
74911index eda7ba3..915dfae 100644
74912--- a/lib/idr.c
74913+++ b/lib/idr.c
74914@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
74915 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
74916
74917 /* if already at the top layer, we need to grow */
74918- if (id >= 1 << (idp->layers * IDR_BITS)) {
74919+ if (id >= (1 << (idp->layers * IDR_BITS))) {
74920 *starting_id = id;
74921 return IDR_NEED_TO_GROW;
74922 }
74923diff --git a/lib/inflate.c b/lib/inflate.c
74924index d102559..4215f31 100644
74925--- a/lib/inflate.c
74926+++ b/lib/inflate.c
74927@@ -266,7 +266,7 @@ static void free(void *where)
74928 malloc_ptr = free_mem_ptr;
74929 }
74930 #else
74931-#define malloc(a) kmalloc(a, GFP_KERNEL)
74932+#define malloc(a) kmalloc((a), GFP_KERNEL)
74933 #define free(a) kfree(a)
74934 #endif
74935
74936diff --git a/lib/kobject.c b/lib/kobject.c
74937index b512b74..8115eb1 100644
74938--- a/lib/kobject.c
74939+++ b/lib/kobject.c
74940@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
74941 return ret;
74942 }
74943
74944-struct sysfs_ops kobj_sysfs_ops = {
74945+const struct sysfs_ops kobj_sysfs_ops = {
74946 .show = kobj_attr_show,
74947 .store = kobj_attr_store,
74948 };
74949@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
74950 * If the kset was not able to be created, NULL will be returned.
74951 */
74952 static struct kset *kset_create(const char *name,
74953- struct kset_uevent_ops *uevent_ops,
74954+ const struct kset_uevent_ops *uevent_ops,
74955 struct kobject *parent_kobj)
74956 {
74957 struct kset *kset;
74958@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
74959 * If the kset was not able to be created, NULL will be returned.
74960 */
74961 struct kset *kset_create_and_add(const char *name,
74962- struct kset_uevent_ops *uevent_ops,
74963+ const struct kset_uevent_ops *uevent_ops,
74964 struct kobject *parent_kobj)
74965 {
74966 struct kset *kset;
74967diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
74968index 507b821..0bf8ed0 100644
74969--- a/lib/kobject_uevent.c
74970+++ b/lib/kobject_uevent.c
74971@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
74972 const char *subsystem;
74973 struct kobject *top_kobj;
74974 struct kset *kset;
74975- struct kset_uevent_ops *uevent_ops;
74976+ const struct kset_uevent_ops *uevent_ops;
74977 u64 seq;
74978 int i = 0;
74979 int retval = 0;
74980diff --git a/lib/kref.c b/lib/kref.c
74981index 9ecd6e8..12c94c1 100644
74982--- a/lib/kref.c
74983+++ b/lib/kref.c
74984@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
74985 */
74986 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
74987 {
74988- WARN_ON(release == NULL);
74989+ BUG_ON(release == NULL);
74990 WARN_ON(release == (void (*)(struct kref *))kfree);
74991
74992 if (atomic_dec_and_test(&kref->refcount)) {
74993diff --git a/lib/parser.c b/lib/parser.c
74994index b00d020..1b34325 100644
74995--- a/lib/parser.c
74996+++ b/lib/parser.c
74997@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
74998 char *buf;
74999 int ret;
75000
75001- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
75002+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
75003 if (!buf)
75004 return -ENOMEM;
75005 memcpy(buf, s->from, s->to - s->from);
75006diff --git a/lib/radix-tree.c b/lib/radix-tree.c
75007index 92cdd99..a8149d7 100644
75008--- a/lib/radix-tree.c
75009+++ b/lib/radix-tree.c
75010@@ -81,7 +81,7 @@ struct radix_tree_preload {
75011 int nr;
75012 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
75013 };
75014-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
75015+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
75016
75017 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
75018 {
75019diff --git a/lib/random32.c b/lib/random32.c
75020index 217d5c4..45aba8a 100644
75021--- a/lib/random32.c
75022+++ b/lib/random32.c
75023@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
75024 */
75025 static inline u32 __seed(u32 x, u32 m)
75026 {
75027- return (x < m) ? x + m : x;
75028+ return (x <= m) ? x + m + 1 : x;
75029 }
75030
75031 /**
75032diff --git a/lib/vsprintf.c b/lib/vsprintf.c
75033index 33bed5e..1477e46 100644
75034--- a/lib/vsprintf.c
75035+++ b/lib/vsprintf.c
75036@@ -16,6 +16,9 @@
75037 * - scnprintf and vscnprintf
75038 */
75039
75040+#ifdef CONFIG_GRKERNSEC_HIDESYM
75041+#define __INCLUDED_BY_HIDESYM 1
75042+#endif
75043 #include <stdarg.h>
75044 #include <linux/module.h>
75045 #include <linux/types.h>
75046@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
75047 return buf;
75048 }
75049
75050-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
75051+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
75052 {
75053 int len, i;
75054
75055 if ((unsigned long)s < PAGE_SIZE)
75056- s = "<NULL>";
75057+ s = "(null)";
75058
75059 len = strnlen(s, spec.precision);
75060
75061@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
75062 unsigned long value = (unsigned long) ptr;
75063 #ifdef CONFIG_KALLSYMS
75064 char sym[KSYM_SYMBOL_LEN];
75065- if (ext != 'f' && ext != 's')
75066+ if (ext != 'f' && ext != 's' && ext != 'a')
75067 sprint_symbol(sym, value);
75068 else
75069 kallsyms_lookup(value, NULL, NULL, NULL, sym);
75070@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
75071 * - 'f' For simple symbolic function names without offset
75072 * - 'S' For symbolic direct pointers with offset
75073 * - 's' For symbolic direct pointers without offset
75074+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
75075+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
75076 * - 'R' For a struct resource pointer, it prints the range of
75077 * addresses (not the name nor the flags)
75078 * - 'M' For a 6-byte MAC address, it prints the address in the
75079@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75080 struct printf_spec spec)
75081 {
75082 if (!ptr)
75083- return string(buf, end, "(null)", spec);
75084+ return string(buf, end, "(nil)", spec);
75085
75086 switch (*fmt) {
75087 case 'F':
75088@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75089 case 's':
75090 /* Fallthrough */
75091 case 'S':
75092+#ifdef CONFIG_GRKERNSEC_HIDESYM
75093+ break;
75094+#else
75095+ return symbol_string(buf, end, ptr, spec, *fmt);
75096+#endif
75097+ case 'a':
75098+ /* Fallthrough */
75099+ case 'A':
75100 return symbol_string(buf, end, ptr, spec, *fmt);
75101 case 'R':
75102 return resource_string(buf, end, ptr, spec);
75103@@ -1445,7 +1458,7 @@ do { \
75104 size_t len;
75105 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
75106 || (unsigned long)save_str < PAGE_SIZE)
75107- save_str = "<NULL>";
75108+ save_str = "(null)";
75109 len = strlen(save_str);
75110 if (str + len + 1 < end)
75111 memcpy(str, save_str, len + 1);
75112@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75113 typeof(type) value; \
75114 if (sizeof(type) == 8) { \
75115 args = PTR_ALIGN(args, sizeof(u32)); \
75116- *(u32 *)&value = *(u32 *)args; \
75117- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
75118+ *(u32 *)&value = *(const u32 *)args; \
75119+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
75120 } else { \
75121 args = PTR_ALIGN(args, sizeof(type)); \
75122- value = *(typeof(type) *)args; \
75123+ value = *(const typeof(type) *)args; \
75124 } \
75125 args += sizeof(type); \
75126 value; \
75127@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75128 const char *str_arg = args;
75129 size_t len = strlen(str_arg);
75130 args += len + 1;
75131- str = string(str, end, (char *)str_arg, spec);
75132+ str = string(str, end, str_arg, spec);
75133 break;
75134 }
75135
75136diff --git a/localversion-grsec b/localversion-grsec
75137new file mode 100644
75138index 0000000..7cd6065
75139--- /dev/null
75140+++ b/localversion-grsec
75141@@ -0,0 +1 @@
75142+-grsec
75143diff --git a/mm/Kconfig b/mm/Kconfig
75144index 2c19c0b..f3c3f83 100644
75145--- a/mm/Kconfig
75146+++ b/mm/Kconfig
75147@@ -228,7 +228,7 @@ config KSM
75148 config DEFAULT_MMAP_MIN_ADDR
75149 int "Low address space to protect from user allocation"
75150 depends on MMU
75151- default 4096
75152+ default 65536
75153 help
75154 This is the portion of low virtual memory which should be protected
75155 from userspace allocation. Keeping a user from writing to low pages
75156diff --git a/mm/backing-dev.c b/mm/backing-dev.c
75157index 67a33a5..094dcf1 100644
75158--- a/mm/backing-dev.c
75159+++ b/mm/backing-dev.c
75160@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
75161 list_add_tail_rcu(&wb->list, &bdi->wb_list);
75162 spin_unlock(&bdi->wb_lock);
75163
75164- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
75165+ tsk->flags |= PF_SWAPWRITE;
75166 set_freezable();
75167
75168 /*
75169@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
75170 * Add the default flusher task that gets created for any bdi
75171 * that has dirty data pending writeout
75172 */
75173-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75174+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75175 {
75176 if (!bdi_cap_writeback_dirty(bdi))
75177 return;
75178diff --git a/mm/filemap.c b/mm/filemap.c
75179index 9e0826e..4ee8f13 100644
75180--- a/mm/filemap.c
75181+++ b/mm/filemap.c
75182@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
75183 struct address_space *mapping = file->f_mapping;
75184
75185 if (!mapping->a_ops->readpage)
75186- return -ENOEXEC;
75187+ return -ENODEV;
75188 file_accessed(file);
75189 vma->vm_ops = &generic_file_vm_ops;
75190 vma->vm_flags |= VM_CAN_NONLINEAR;
75191@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
75192 *pos = i_size_read(inode);
75193
75194 if (limit != RLIM_INFINITY) {
75195+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
75196 if (*pos >= limit) {
75197 send_sig(SIGXFSZ, current, 0);
75198 return -EFBIG;
75199diff --git a/mm/fremap.c b/mm/fremap.c
75200index b6ec85a..a24ac22 100644
75201--- a/mm/fremap.c
75202+++ b/mm/fremap.c
75203@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75204 retry:
75205 vma = find_vma(mm, start);
75206
75207+#ifdef CONFIG_PAX_SEGMEXEC
75208+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
75209+ goto out;
75210+#endif
75211+
75212 /*
75213 * Make sure the vma is shared, that it supports prefaulting,
75214 * and that the remapped range is valid and fully within
75215@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75216 /*
75217 * drop PG_Mlocked flag for over-mapped range
75218 */
75219- unsigned int saved_flags = vma->vm_flags;
75220+ unsigned long saved_flags = vma->vm_flags;
75221 munlock_vma_pages_range(vma, start, start + size);
75222 vma->vm_flags = saved_flags;
75223 }
75224diff --git a/mm/highmem.c b/mm/highmem.c
75225index 9c1e627..5ca9447 100644
75226--- a/mm/highmem.c
75227+++ b/mm/highmem.c
75228@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
75229 * So no dangers, even with speculative execution.
75230 */
75231 page = pte_page(pkmap_page_table[i]);
75232+ pax_open_kernel();
75233 pte_clear(&init_mm, (unsigned long)page_address(page),
75234 &pkmap_page_table[i]);
75235-
75236+ pax_close_kernel();
75237 set_page_address(page, NULL);
75238 need_flush = 1;
75239 }
75240@@ -177,9 +178,11 @@ start:
75241 }
75242 }
75243 vaddr = PKMAP_ADDR(last_pkmap_nr);
75244+
75245+ pax_open_kernel();
75246 set_pte_at(&init_mm, vaddr,
75247 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
75248-
75249+ pax_close_kernel();
75250 pkmap_count[last_pkmap_nr] = 1;
75251 set_page_address(page, (void *)vaddr);
75252
75253diff --git a/mm/hugetlb.c b/mm/hugetlb.c
75254index 5e1e508..9f0ebad 100644
75255--- a/mm/hugetlb.c
75256+++ b/mm/hugetlb.c
75257@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
75258 return 1;
75259 }
75260
75261+#ifdef CONFIG_PAX_SEGMEXEC
75262+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
75263+{
75264+ struct mm_struct *mm = vma->vm_mm;
75265+ struct vm_area_struct *vma_m;
75266+ unsigned long address_m;
75267+ pte_t *ptep_m;
75268+
75269+ vma_m = pax_find_mirror_vma(vma);
75270+ if (!vma_m)
75271+ return;
75272+
75273+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75274+ address_m = address + SEGMEXEC_TASK_SIZE;
75275+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
75276+ get_page(page_m);
75277+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
75278+}
75279+#endif
75280+
75281 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
75282 unsigned long address, pte_t *ptep, pte_t pte,
75283 struct page *pagecache_page)
75284@@ -2004,6 +2024,11 @@ retry_avoidcopy:
75285 huge_ptep_clear_flush(vma, address, ptep);
75286 set_huge_pte_at(mm, address, ptep,
75287 make_huge_pte(vma, new_page, 1));
75288+
75289+#ifdef CONFIG_PAX_SEGMEXEC
75290+ pax_mirror_huge_pte(vma, address, new_page);
75291+#endif
75292+
75293 /* Make the old page be freed below */
75294 new_page = old_page;
75295 }
75296@@ -2135,6 +2160,10 @@ retry:
75297 && (vma->vm_flags & VM_SHARED)));
75298 set_huge_pte_at(mm, address, ptep, new_pte);
75299
75300+#ifdef CONFIG_PAX_SEGMEXEC
75301+ pax_mirror_huge_pte(vma, address, page);
75302+#endif
75303+
75304 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
75305 /* Optimization, do the COW without a second fault */
75306 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
75307@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75308 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
75309 struct hstate *h = hstate_vma(vma);
75310
75311+#ifdef CONFIG_PAX_SEGMEXEC
75312+ struct vm_area_struct *vma_m;
75313+
75314+ vma_m = pax_find_mirror_vma(vma);
75315+ if (vma_m) {
75316+ unsigned long address_m;
75317+
75318+ if (vma->vm_start > vma_m->vm_start) {
75319+ address_m = address;
75320+ address -= SEGMEXEC_TASK_SIZE;
75321+ vma = vma_m;
75322+ h = hstate_vma(vma);
75323+ } else
75324+ address_m = address + SEGMEXEC_TASK_SIZE;
75325+
75326+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
75327+ return VM_FAULT_OOM;
75328+ address_m &= HPAGE_MASK;
75329+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
75330+ }
75331+#endif
75332+
75333 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
75334 if (!ptep)
75335 return VM_FAULT_OOM;
75336diff --git a/mm/internal.h b/mm/internal.h
75337index f03e8e2..7354343 100644
75338--- a/mm/internal.h
75339+++ b/mm/internal.h
75340@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
75341 * in mm/page_alloc.c
75342 */
75343 extern void __free_pages_bootmem(struct page *page, unsigned int order);
75344+extern void free_compound_page(struct page *page);
75345 extern void prep_compound_page(struct page *page, unsigned long order);
75346
75347
75348diff --git a/mm/kmemleak.c b/mm/kmemleak.c
75349index c346660..b47382f 100644
75350--- a/mm/kmemleak.c
75351+++ b/mm/kmemleak.c
75352@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
75353
75354 for (i = 0; i < object->trace_len; i++) {
75355 void *ptr = (void *)object->trace[i];
75356- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
75357+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
75358 }
75359 }
75360
75361diff --git a/mm/maccess.c b/mm/maccess.c
75362index 9073695..1127f348 100644
75363--- a/mm/maccess.c
75364+++ b/mm/maccess.c
75365@@ -14,7 +14,7 @@
75366 * Safely read from address @src to the buffer at @dst. If a kernel fault
75367 * happens, handle that and return -EFAULT.
75368 */
75369-long probe_kernel_read(void *dst, void *src, size_t size)
75370+long probe_kernel_read(void *dst, const void *src, size_t size)
75371 {
75372 long ret;
75373 mm_segment_t old_fs = get_fs();
75374@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
75375 set_fs(KERNEL_DS);
75376 pagefault_disable();
75377 ret = __copy_from_user_inatomic(dst,
75378- (__force const void __user *)src, size);
75379+ (const void __force_user *)src, size);
75380 pagefault_enable();
75381 set_fs(old_fs);
75382
75383@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
75384 * Safely write to address @dst from the buffer at @src. If a kernel fault
75385 * happens, handle that and return -EFAULT.
75386 */
75387-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
75388+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
75389 {
75390 long ret;
75391 mm_segment_t old_fs = get_fs();
75392
75393 set_fs(KERNEL_DS);
75394 pagefault_disable();
75395- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
75396+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
75397 pagefault_enable();
75398 set_fs(old_fs);
75399
75400diff --git a/mm/madvise.c b/mm/madvise.c
75401index 35b1479..499f7d4 100644
75402--- a/mm/madvise.c
75403+++ b/mm/madvise.c
75404@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
75405 pgoff_t pgoff;
75406 unsigned long new_flags = vma->vm_flags;
75407
75408+#ifdef CONFIG_PAX_SEGMEXEC
75409+ struct vm_area_struct *vma_m;
75410+#endif
75411+
75412 switch (behavior) {
75413 case MADV_NORMAL:
75414 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
75415@@ -103,6 +107,13 @@ success:
75416 /*
75417 * vm_flags is protected by the mmap_sem held in write mode.
75418 */
75419+
75420+#ifdef CONFIG_PAX_SEGMEXEC
75421+ vma_m = pax_find_mirror_vma(vma);
75422+ if (vma_m)
75423+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
75424+#endif
75425+
75426 vma->vm_flags = new_flags;
75427
75428 out:
75429@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75430 struct vm_area_struct ** prev,
75431 unsigned long start, unsigned long end)
75432 {
75433+
75434+#ifdef CONFIG_PAX_SEGMEXEC
75435+ struct vm_area_struct *vma_m;
75436+#endif
75437+
75438 *prev = vma;
75439 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
75440 return -EINVAL;
75441@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75442 zap_page_range(vma, start, end - start, &details);
75443 } else
75444 zap_page_range(vma, start, end - start, NULL);
75445+
75446+#ifdef CONFIG_PAX_SEGMEXEC
75447+ vma_m = pax_find_mirror_vma(vma);
75448+ if (vma_m) {
75449+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
75450+ struct zap_details details = {
75451+ .nonlinear_vma = vma_m,
75452+ .last_index = ULONG_MAX,
75453+ };
75454+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
75455+ } else
75456+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
75457+ }
75458+#endif
75459+
75460 return 0;
75461 }
75462
75463@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
75464 if (end < start)
75465 goto out;
75466
75467+#ifdef CONFIG_PAX_SEGMEXEC
75468+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
75469+ if (end > SEGMEXEC_TASK_SIZE)
75470+ goto out;
75471+ } else
75472+#endif
75473+
75474+ if (end > TASK_SIZE)
75475+ goto out;
75476+
75477 error = 0;
75478 if (end == start)
75479 goto out;
75480diff --git a/mm/memory-failure.c b/mm/memory-failure.c
75481index 8aeba53..b4a4198 100644
75482--- a/mm/memory-failure.c
75483+++ b/mm/memory-failure.c
75484@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
75485
75486 int sysctl_memory_failure_recovery __read_mostly = 1;
75487
75488-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75489+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
75490
75491 /*
75492 * Send all the processes who have the page mapped an ``action optional''
75493@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
75494 si.si_signo = SIGBUS;
75495 si.si_errno = 0;
75496 si.si_code = BUS_MCEERR_AO;
75497- si.si_addr = (void *)addr;
75498+ si.si_addr = (void __user *)addr;
75499 #ifdef __ARCH_SI_TRAPNO
75500 si.si_trapno = trapno;
75501 #endif
75502@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
75503 return 0;
75504 }
75505
75506- atomic_long_add(1, &mce_bad_pages);
75507+ atomic_long_add_unchecked(1, &mce_bad_pages);
75508
75509 /*
75510 * We need/can do nothing about count=0 pages.
75511diff --git a/mm/memory.c b/mm/memory.c
75512index 6c836d3..48f3264 100644
75513--- a/mm/memory.c
75514+++ b/mm/memory.c
75515@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
75516 return;
75517
75518 pmd = pmd_offset(pud, start);
75519+
75520+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
75521 pud_clear(pud);
75522 pmd_free_tlb(tlb, pmd, start);
75523+#endif
75524+
75525 }
75526
75527 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75528@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
75529 if (end - 1 > ceiling - 1)
75530 return;
75531
75532+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
75533 pud = pud_offset(pgd, start);
75534 pgd_clear(pgd);
75535 pud_free_tlb(tlb, pud, start);
75536+#endif
75537+
75538 }
75539
75540 /*
75541@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75542 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
75543 i = 0;
75544
75545- do {
75546+ while (nr_pages) {
75547 struct vm_area_struct *vma;
75548
75549- vma = find_extend_vma(mm, start);
75550+ vma = find_vma(mm, start);
75551 if (!vma && in_gate_area(tsk, start)) {
75552 unsigned long pg = start & PAGE_MASK;
75553 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
75554@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75555 continue;
75556 }
75557
75558- if (!vma ||
75559+ if (!vma || start < vma->vm_start ||
75560 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
75561 !(vm_flags & vma->vm_flags))
75562 return i ? : -EFAULT;
75563@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75564 start += PAGE_SIZE;
75565 nr_pages--;
75566 } while (nr_pages && start < vma->vm_end);
75567- } while (nr_pages);
75568+ }
75569 return i;
75570 }
75571
75572@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
75573 page_add_file_rmap(page);
75574 set_pte_at(mm, addr, pte, mk_pte(page, prot));
75575
75576+#ifdef CONFIG_PAX_SEGMEXEC
75577+ pax_mirror_file_pte(vma, addr, page, ptl);
75578+#endif
75579+
75580 retval = 0;
75581 pte_unmap_unlock(pte, ptl);
75582 return retval;
75583@@ -1560,10 +1571,22 @@ out:
75584 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
75585 struct page *page)
75586 {
75587+
75588+#ifdef CONFIG_PAX_SEGMEXEC
75589+ struct vm_area_struct *vma_m;
75590+#endif
75591+
75592 if (addr < vma->vm_start || addr >= vma->vm_end)
75593 return -EFAULT;
75594 if (!page_count(page))
75595 return -EINVAL;
75596+
75597+#ifdef CONFIG_PAX_SEGMEXEC
75598+ vma_m = pax_find_mirror_vma(vma);
75599+ if (vma_m)
75600+ vma_m->vm_flags |= VM_INSERTPAGE;
75601+#endif
75602+
75603 vma->vm_flags |= VM_INSERTPAGE;
75604 return insert_page(vma, addr, page, vma->vm_page_prot);
75605 }
75606@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
75607 unsigned long pfn)
75608 {
75609 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
75610+ BUG_ON(vma->vm_mirror);
75611
75612 if (addr < vma->vm_start || addr >= vma->vm_end)
75613 return -EFAULT;
75614@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
75615 copy_user_highpage(dst, src, va, vma);
75616 }
75617
75618+#ifdef CONFIG_PAX_SEGMEXEC
75619+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
75620+{
75621+ struct mm_struct *mm = vma->vm_mm;
75622+ spinlock_t *ptl;
75623+ pte_t *pte, entry;
75624+
75625+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
75626+ entry = *pte;
75627+ if (!pte_present(entry)) {
75628+ if (!pte_none(entry)) {
75629+ BUG_ON(pte_file(entry));
75630+ free_swap_and_cache(pte_to_swp_entry(entry));
75631+ pte_clear_not_present_full(mm, address, pte, 0);
75632+ }
75633+ } else {
75634+ struct page *page;
75635+
75636+ flush_cache_page(vma, address, pte_pfn(entry));
75637+ entry = ptep_clear_flush(vma, address, pte);
75638+ BUG_ON(pte_dirty(entry));
75639+ page = vm_normal_page(vma, address, entry);
75640+ if (page) {
75641+ update_hiwater_rss(mm);
75642+ if (PageAnon(page))
75643+ dec_mm_counter(mm, anon_rss);
75644+ else
75645+ dec_mm_counter(mm, file_rss);
75646+ page_remove_rmap(page);
75647+ page_cache_release(page);
75648+ }
75649+ }
75650+ pte_unmap_unlock(pte, ptl);
75651+}
75652+
75653+/* PaX: if vma is mirrored, synchronize the mirror's PTE
75654+ *
75655+ * the ptl of the lower mapped page is held on entry and is not released on exit
75656+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
75657+ */
75658+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75659+{
75660+ struct mm_struct *mm = vma->vm_mm;
75661+ unsigned long address_m;
75662+ spinlock_t *ptl_m;
75663+ struct vm_area_struct *vma_m;
75664+ pmd_t *pmd_m;
75665+ pte_t *pte_m, entry_m;
75666+
75667+ BUG_ON(!page_m || !PageAnon(page_m));
75668+
75669+ vma_m = pax_find_mirror_vma(vma);
75670+ if (!vma_m)
75671+ return;
75672+
75673+ BUG_ON(!PageLocked(page_m));
75674+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75675+ address_m = address + SEGMEXEC_TASK_SIZE;
75676+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75677+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75678+ ptl_m = pte_lockptr(mm, pmd_m);
75679+ if (ptl != ptl_m) {
75680+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75681+ if (!pte_none(*pte_m))
75682+ goto out;
75683+ }
75684+
75685+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75686+ page_cache_get(page_m);
75687+ page_add_anon_rmap(page_m, vma_m, address_m);
75688+ inc_mm_counter(mm, anon_rss);
75689+ set_pte_at(mm, address_m, pte_m, entry_m);
75690+ update_mmu_cache(vma_m, address_m, entry_m);
75691+out:
75692+ if (ptl != ptl_m)
75693+ spin_unlock(ptl_m);
75694+ pte_unmap_nested(pte_m);
75695+ unlock_page(page_m);
75696+}
75697+
75698+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
75699+{
75700+ struct mm_struct *mm = vma->vm_mm;
75701+ unsigned long address_m;
75702+ spinlock_t *ptl_m;
75703+ struct vm_area_struct *vma_m;
75704+ pmd_t *pmd_m;
75705+ pte_t *pte_m, entry_m;
75706+
75707+ BUG_ON(!page_m || PageAnon(page_m));
75708+
75709+ vma_m = pax_find_mirror_vma(vma);
75710+ if (!vma_m)
75711+ return;
75712+
75713+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75714+ address_m = address + SEGMEXEC_TASK_SIZE;
75715+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75716+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75717+ ptl_m = pte_lockptr(mm, pmd_m);
75718+ if (ptl != ptl_m) {
75719+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75720+ if (!pte_none(*pte_m))
75721+ goto out;
75722+ }
75723+
75724+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
75725+ page_cache_get(page_m);
75726+ page_add_file_rmap(page_m);
75727+ inc_mm_counter(mm, file_rss);
75728+ set_pte_at(mm, address_m, pte_m, entry_m);
75729+ update_mmu_cache(vma_m, address_m, entry_m);
75730+out:
75731+ if (ptl != ptl_m)
75732+ spin_unlock(ptl_m);
75733+ pte_unmap_nested(pte_m);
75734+}
75735+
75736+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
75737+{
75738+ struct mm_struct *mm = vma->vm_mm;
75739+ unsigned long address_m;
75740+ spinlock_t *ptl_m;
75741+ struct vm_area_struct *vma_m;
75742+ pmd_t *pmd_m;
75743+ pte_t *pte_m, entry_m;
75744+
75745+ vma_m = pax_find_mirror_vma(vma);
75746+ if (!vma_m)
75747+ return;
75748+
75749+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75750+ address_m = address + SEGMEXEC_TASK_SIZE;
75751+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
75752+ pte_m = pte_offset_map_nested(pmd_m, address_m);
75753+ ptl_m = pte_lockptr(mm, pmd_m);
75754+ if (ptl != ptl_m) {
75755+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
75756+ if (!pte_none(*pte_m))
75757+ goto out;
75758+ }
75759+
75760+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
75761+ set_pte_at(mm, address_m, pte_m, entry_m);
75762+out:
75763+ if (ptl != ptl_m)
75764+ spin_unlock(ptl_m);
75765+ pte_unmap_nested(pte_m);
75766+}
75767+
75768+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
75769+{
75770+ struct page *page_m;
75771+ pte_t entry;
75772+
75773+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
75774+ goto out;
75775+
75776+ entry = *pte;
75777+ page_m = vm_normal_page(vma, address, entry);
75778+ if (!page_m)
75779+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
75780+ else if (PageAnon(page_m)) {
75781+ if (pax_find_mirror_vma(vma)) {
75782+ pte_unmap_unlock(pte, ptl);
75783+ lock_page(page_m);
75784+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
75785+ if (pte_same(entry, *pte))
75786+ pax_mirror_anon_pte(vma, address, page_m, ptl);
75787+ else
75788+ unlock_page(page_m);
75789+ }
75790+ } else
75791+ pax_mirror_file_pte(vma, address, page_m, ptl);
75792+
75793+out:
75794+ pte_unmap_unlock(pte, ptl);
75795+}
75796+#endif
75797+
75798 /*
75799 * This routine handles present pages, when users try to write
75800 * to a shared page. It is done by copying the page to a new address
75801@@ -2156,6 +2360,12 @@ gotten:
75802 */
75803 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
75804 if (likely(pte_same(*page_table, orig_pte))) {
75805+
75806+#ifdef CONFIG_PAX_SEGMEXEC
75807+ if (pax_find_mirror_vma(vma))
75808+ BUG_ON(!trylock_page(new_page));
75809+#endif
75810+
75811 if (old_page) {
75812 if (!PageAnon(old_page)) {
75813 dec_mm_counter(mm, file_rss);
75814@@ -2207,6 +2417,10 @@ gotten:
75815 page_remove_rmap(old_page);
75816 }
75817
75818+#ifdef CONFIG_PAX_SEGMEXEC
75819+ pax_mirror_anon_pte(vma, address, new_page, ptl);
75820+#endif
75821+
75822 /* Free the old page.. */
75823 new_page = old_page;
75824 ret |= VM_FAULT_WRITE;
75825@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
75826 swap_free(entry);
75827 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
75828 try_to_free_swap(page);
75829+
75830+#ifdef CONFIG_PAX_SEGMEXEC
75831+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
75832+#endif
75833+
75834 unlock_page(page);
75835
75836 if (flags & FAULT_FLAG_WRITE) {
75837@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
75838
75839 /* No need to invalidate - it was non-present before */
75840 update_mmu_cache(vma, address, pte);
75841+
75842+#ifdef CONFIG_PAX_SEGMEXEC
75843+ pax_mirror_anon_pte(vma, address, page, ptl);
75844+#endif
75845+
75846 unlock:
75847 pte_unmap_unlock(page_table, ptl);
75848 out:
75849@@ -2632,40 +2856,6 @@ out_release:
75850 }
75851
75852 /*
75853- * This is like a special single-page "expand_{down|up}wards()",
75854- * except we must first make sure that 'address{-|+}PAGE_SIZE'
75855- * doesn't hit another vma.
75856- */
75857-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
75858-{
75859- address &= PAGE_MASK;
75860- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
75861- struct vm_area_struct *prev = vma->vm_prev;
75862-
75863- /*
75864- * Is there a mapping abutting this one below?
75865- *
75866- * That's only ok if it's the same stack mapping
75867- * that has gotten split..
75868- */
75869- if (prev && prev->vm_end == address)
75870- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
75871-
75872- expand_stack(vma, address - PAGE_SIZE);
75873- }
75874- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
75875- struct vm_area_struct *next = vma->vm_next;
75876-
75877- /* As VM_GROWSDOWN but s/below/above/ */
75878- if (next && next->vm_start == address + PAGE_SIZE)
75879- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
75880-
75881- expand_upwards(vma, address + PAGE_SIZE);
75882- }
75883- return 0;
75884-}
75885-
75886-/*
75887 * We enter with non-exclusive mmap_sem (to exclude vma changes,
75888 * but allow concurrent faults), and pte mapped but not yet locked.
75889 * We return with mmap_sem still held, but pte unmapped and unlocked.
75890@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
75891 unsigned long address, pte_t *page_table, pmd_t *pmd,
75892 unsigned int flags)
75893 {
75894- struct page *page;
75895+ struct page *page = NULL;
75896 spinlock_t *ptl;
75897 pte_t entry;
75898
75899- pte_unmap(page_table);
75900-
75901- /* Check if we need to add a guard page to the stack */
75902- if (check_stack_guard_page(vma, address) < 0)
75903- return VM_FAULT_SIGBUS;
75904-
75905- /* Use the zero-page for reads */
75906 if (!(flags & FAULT_FLAG_WRITE)) {
75907 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
75908 vma->vm_page_prot));
75909- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
75910+ ptl = pte_lockptr(mm, pmd);
75911+ spin_lock(ptl);
75912 if (!pte_none(*page_table))
75913 goto unlock;
75914 goto setpte;
75915 }
75916
75917 /* Allocate our own private page. */
75918+ pte_unmap(page_table);
75919+
75920 if (unlikely(anon_vma_prepare(vma)))
75921 goto oom;
75922 page = alloc_zeroed_user_highpage_movable(vma, address);
75923@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
75924 if (!pte_none(*page_table))
75925 goto release;
75926
75927+#ifdef CONFIG_PAX_SEGMEXEC
75928+ if (pax_find_mirror_vma(vma))
75929+ BUG_ON(!trylock_page(page));
75930+#endif
75931+
75932 inc_mm_counter(mm, anon_rss);
75933 page_add_new_anon_rmap(page, vma, address);
75934 setpte:
75935@@ -2720,6 +2911,12 @@ setpte:
75936
75937 /* No need to invalidate - it was non-present before */
75938 update_mmu_cache(vma, address, entry);
75939+
75940+#ifdef CONFIG_PAX_SEGMEXEC
75941+ if (page)
75942+ pax_mirror_anon_pte(vma, address, page, ptl);
75943+#endif
75944+
75945 unlock:
75946 pte_unmap_unlock(page_table, ptl);
75947 return 0;
75948@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75949 */
75950 /* Only go through if we didn't race with anybody else... */
75951 if (likely(pte_same(*page_table, orig_pte))) {
75952+
75953+#ifdef CONFIG_PAX_SEGMEXEC
75954+ if (anon && pax_find_mirror_vma(vma))
75955+ BUG_ON(!trylock_page(page));
75956+#endif
75957+
75958 flush_icache_page(vma, page);
75959 entry = mk_pte(page, vma->vm_page_prot);
75960 if (flags & FAULT_FLAG_WRITE)
75961@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75962
75963 /* no need to invalidate: a not-present page won't be cached */
75964 update_mmu_cache(vma, address, entry);
75965+
75966+#ifdef CONFIG_PAX_SEGMEXEC
75967+ if (anon)
75968+ pax_mirror_anon_pte(vma, address, page, ptl);
75969+ else
75970+ pax_mirror_file_pte(vma, address, page, ptl);
75971+#endif
75972+
75973 } else {
75974 if (charged)
75975 mem_cgroup_uncharge_page(page);
75976@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
75977 if (flags & FAULT_FLAG_WRITE)
75978 flush_tlb_page(vma, address);
75979 }
75980+
75981+#ifdef CONFIG_PAX_SEGMEXEC
75982+ pax_mirror_pte(vma, address, pte, pmd, ptl);
75983+ return 0;
75984+#endif
75985+
75986 unlock:
75987 pte_unmap_unlock(pte, ptl);
75988 return 0;
75989@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75990 pmd_t *pmd;
75991 pte_t *pte;
75992
75993+#ifdef CONFIG_PAX_SEGMEXEC
75994+ struct vm_area_struct *vma_m;
75995+#endif
75996+
75997 __set_current_state(TASK_RUNNING);
75998
75999 count_vm_event(PGFAULT);
76000@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76001 if (unlikely(is_vm_hugetlb_page(vma)))
76002 return hugetlb_fault(mm, vma, address, flags);
76003
76004+#ifdef CONFIG_PAX_SEGMEXEC
76005+ vma_m = pax_find_mirror_vma(vma);
76006+ if (vma_m) {
76007+ unsigned long address_m;
76008+ pgd_t *pgd_m;
76009+ pud_t *pud_m;
76010+ pmd_t *pmd_m;
76011+
76012+ if (vma->vm_start > vma_m->vm_start) {
76013+ address_m = address;
76014+ address -= SEGMEXEC_TASK_SIZE;
76015+ vma = vma_m;
76016+ } else
76017+ address_m = address + SEGMEXEC_TASK_SIZE;
76018+
76019+ pgd_m = pgd_offset(mm, address_m);
76020+ pud_m = pud_alloc(mm, pgd_m, address_m);
76021+ if (!pud_m)
76022+ return VM_FAULT_OOM;
76023+ pmd_m = pmd_alloc(mm, pud_m, address_m);
76024+ if (!pmd_m)
76025+ return VM_FAULT_OOM;
76026+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
76027+ return VM_FAULT_OOM;
76028+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
76029+ }
76030+#endif
76031+
76032 pgd = pgd_offset(mm, address);
76033 pud = pud_alloc(mm, pgd, address);
76034 if (!pud)
76035@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
76036 gate_vma.vm_start = FIXADDR_USER_START;
76037 gate_vma.vm_end = FIXADDR_USER_END;
76038 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
76039- gate_vma.vm_page_prot = __P101;
76040+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
76041 /*
76042 * Make sure the vDSO gets into every core dump.
76043 * Dumping its contents makes post-mortem fully interpretable later
76044diff --git a/mm/mempolicy.c b/mm/mempolicy.c
76045index 3c6e3e2..ad9871c 100644
76046--- a/mm/mempolicy.c
76047+++ b/mm/mempolicy.c
76048@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76049 struct vm_area_struct *next;
76050 int err;
76051
76052+#ifdef CONFIG_PAX_SEGMEXEC
76053+ struct vm_area_struct *vma_m;
76054+#endif
76055+
76056 err = 0;
76057 for (; vma && vma->vm_start < end; vma = next) {
76058 next = vma->vm_next;
76059@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76060 err = policy_vma(vma, new);
76061 if (err)
76062 break;
76063+
76064+#ifdef CONFIG_PAX_SEGMEXEC
76065+ vma_m = pax_find_mirror_vma(vma);
76066+ if (vma_m) {
76067+ err = policy_vma(vma_m, new);
76068+ if (err)
76069+ break;
76070+ }
76071+#endif
76072+
76073 }
76074 return err;
76075 }
76076@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
76077
76078 if (end < start)
76079 return -EINVAL;
76080+
76081+#ifdef CONFIG_PAX_SEGMEXEC
76082+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76083+ if (end > SEGMEXEC_TASK_SIZE)
76084+ return -EINVAL;
76085+ } else
76086+#endif
76087+
76088+ if (end > TASK_SIZE)
76089+ return -EINVAL;
76090+
76091 if (end == start)
76092 return 0;
76093
76094@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76095 if (!mm)
76096 return -EINVAL;
76097
76098+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76099+ if (mm != current->mm &&
76100+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76101+ err = -EPERM;
76102+ goto out;
76103+ }
76104+#endif
76105+
76106 /*
76107 * Check if this process has the right to modify the specified
76108 * process. The right exists if the process has administrative
76109@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76110 rcu_read_lock();
76111 tcred = __task_cred(task);
76112 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76113- cred->uid != tcred->suid && cred->uid != tcred->uid &&
76114- !capable(CAP_SYS_NICE)) {
76115+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76116 rcu_read_unlock();
76117 err = -EPERM;
76118 goto out;
76119@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
76120
76121 if (file) {
76122 seq_printf(m, " file=");
76123- seq_path(m, &file->f_path, "\n\t= ");
76124+ seq_path(m, &file->f_path, "\n\t\\= ");
76125 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
76126 seq_printf(m, " heap");
76127 } else if (vma->vm_start <= mm->start_stack &&
76128diff --git a/mm/migrate.c b/mm/migrate.c
76129index aaca868..2ebecdc 100644
76130--- a/mm/migrate.c
76131+++ b/mm/migrate.c
76132@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
76133 unsigned long chunk_start;
76134 int err;
76135
76136+ pax_track_stack();
76137+
76138 task_nodes = cpuset_mems_allowed(task);
76139
76140 err = -ENOMEM;
76141@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76142 if (!mm)
76143 return -EINVAL;
76144
76145+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76146+ if (mm != current->mm &&
76147+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76148+ err = -EPERM;
76149+ goto out;
76150+ }
76151+#endif
76152+
76153 /*
76154 * Check if this process has the right to modify the specified
76155 * process. The right exists if the process has administrative
76156@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76157 rcu_read_lock();
76158 tcred = __task_cred(task);
76159 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76160- cred->uid != tcred->suid && cred->uid != tcred->uid &&
76161- !capable(CAP_SYS_NICE)) {
76162+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76163 rcu_read_unlock();
76164 err = -EPERM;
76165 goto out;
76166diff --git a/mm/mlock.c b/mm/mlock.c
76167index 2d846cf..98134d2 100644
76168--- a/mm/mlock.c
76169+++ b/mm/mlock.c
76170@@ -13,6 +13,7 @@
76171 #include <linux/pagemap.h>
76172 #include <linux/mempolicy.h>
76173 #include <linux/syscalls.h>
76174+#include <linux/security.h>
76175 #include <linux/sched.h>
76176 #include <linux/module.h>
76177 #include <linux/rmap.h>
76178@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
76179 }
76180 }
76181
76182-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
76183-{
76184- return (vma->vm_flags & VM_GROWSDOWN) &&
76185- (vma->vm_start == addr) &&
76186- !vma_stack_continue(vma->vm_prev, addr);
76187-}
76188-
76189 /**
76190 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
76191 * @vma: target vma
76192@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
76193 if (vma->vm_flags & VM_WRITE)
76194 gup_flags |= FOLL_WRITE;
76195
76196- /* We don't try to access the guard page of a stack vma */
76197- if (stack_guard_page(vma, start)) {
76198- addr += PAGE_SIZE;
76199- nr_pages--;
76200- }
76201-
76202 while (nr_pages > 0) {
76203 int i;
76204
76205@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
76206 {
76207 unsigned long nstart, end, tmp;
76208 struct vm_area_struct * vma, * prev;
76209- int error;
76210+ int error = -EINVAL;
76211
76212 len = PAGE_ALIGN(len);
76213 end = start + len;
76214@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
76215 return -EINVAL;
76216 if (end == start)
76217 return 0;
76218+ if (end > TASK_SIZE)
76219+ return -EINVAL;
76220+
76221 vma = find_vma_prev(current->mm, start, &prev);
76222 if (!vma || vma->vm_start > start)
76223 return -ENOMEM;
76224@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
76225 for (nstart = start ; ; ) {
76226 unsigned int newflags;
76227
76228+#ifdef CONFIG_PAX_SEGMEXEC
76229+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76230+ break;
76231+#endif
76232+
76233 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
76234
76235 newflags = vma->vm_flags | VM_LOCKED;
76236@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
76237 lock_limit >>= PAGE_SHIFT;
76238
76239 /* check against resource limits */
76240+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
76241 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
76242 error = do_mlock(start, len, 1);
76243 up_write(&current->mm->mmap_sem);
76244@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
76245 static int do_mlockall(int flags)
76246 {
76247 struct vm_area_struct * vma, * prev = NULL;
76248- unsigned int def_flags = 0;
76249
76250 if (flags & MCL_FUTURE)
76251- def_flags = VM_LOCKED;
76252- current->mm->def_flags = def_flags;
76253+ current->mm->def_flags |= VM_LOCKED;
76254+ else
76255+ current->mm->def_flags &= ~VM_LOCKED;
76256 if (flags == MCL_FUTURE)
76257 goto out;
76258
76259 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
76260- unsigned int newflags;
76261+ unsigned long newflags;
76262+
76263+#ifdef CONFIG_PAX_SEGMEXEC
76264+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76265+ break;
76266+#endif
76267
76268+ BUG_ON(vma->vm_end > TASK_SIZE);
76269 newflags = vma->vm_flags | VM_LOCKED;
76270 if (!(flags & MCL_CURRENT))
76271 newflags &= ~VM_LOCKED;
76272@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
76273 lock_limit >>= PAGE_SHIFT;
76274
76275 ret = -ENOMEM;
76276+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
76277 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
76278 capable(CAP_IPC_LOCK))
76279 ret = do_mlockall(flags);
76280diff --git a/mm/mmap.c b/mm/mmap.c
76281index 4b80cbf..c5ce1df 100644
76282--- a/mm/mmap.c
76283+++ b/mm/mmap.c
76284@@ -45,6 +45,16 @@
76285 #define arch_rebalance_pgtables(addr, len) (addr)
76286 #endif
76287
76288+static inline void verify_mm_writelocked(struct mm_struct *mm)
76289+{
76290+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
76291+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76292+ up_read(&mm->mmap_sem);
76293+ BUG();
76294+ }
76295+#endif
76296+}
76297+
76298 static void unmap_region(struct mm_struct *mm,
76299 struct vm_area_struct *vma, struct vm_area_struct *prev,
76300 unsigned long start, unsigned long end);
76301@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
76302 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
76303 *
76304 */
76305-pgprot_t protection_map[16] = {
76306+pgprot_t protection_map[16] __read_only = {
76307 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76308 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
76309 };
76310
76311 pgprot_t vm_get_page_prot(unsigned long vm_flags)
76312 {
76313- return __pgprot(pgprot_val(protection_map[vm_flags &
76314+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
76315 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
76316 pgprot_val(arch_vm_get_page_prot(vm_flags)));
76317+
76318+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76319+ if (!nx_enabled &&
76320+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
76321+ (vm_flags & (VM_READ | VM_WRITE)))
76322+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
76323+#endif
76324+
76325+ return prot;
76326 }
76327 EXPORT_SYMBOL(vm_get_page_prot);
76328
76329 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76330 int sysctl_overcommit_ratio = 50; /* default is 50% */
76331 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
76332+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
76333 struct percpu_counter vm_committed_as;
76334
76335 /*
76336@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
76337 struct vm_area_struct *next = vma->vm_next;
76338
76339 might_sleep();
76340+ BUG_ON(vma->vm_mirror);
76341 if (vma->vm_ops && vma->vm_ops->close)
76342 vma->vm_ops->close(vma);
76343 if (vma->vm_file) {
76344@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
76345 * not page aligned -Ram Gupta
76346 */
76347 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
76348+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
76349 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
76350 (mm->end_data - mm->start_data) > rlim)
76351 goto out;
76352@@ -704,6 +726,12 @@ static int
76353 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
76354 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76355 {
76356+
76357+#ifdef CONFIG_PAX_SEGMEXEC
76358+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
76359+ return 0;
76360+#endif
76361+
76362 if (is_mergeable_vma(vma, file, vm_flags) &&
76363 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76364 if (vma->vm_pgoff == vm_pgoff)
76365@@ -723,6 +751,12 @@ static int
76366 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76367 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76368 {
76369+
76370+#ifdef CONFIG_PAX_SEGMEXEC
76371+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
76372+ return 0;
76373+#endif
76374+
76375 if (is_mergeable_vma(vma, file, vm_flags) &&
76376 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76377 pgoff_t vm_pglen;
76378@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76379 struct vm_area_struct *vma_merge(struct mm_struct *mm,
76380 struct vm_area_struct *prev, unsigned long addr,
76381 unsigned long end, unsigned long vm_flags,
76382- struct anon_vma *anon_vma, struct file *file,
76383+ struct anon_vma *anon_vma, struct file *file,
76384 pgoff_t pgoff, struct mempolicy *policy)
76385 {
76386 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
76387 struct vm_area_struct *area, *next;
76388
76389+#ifdef CONFIG_PAX_SEGMEXEC
76390+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
76391+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
76392+
76393+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
76394+#endif
76395+
76396 /*
76397 * We later require that vma->vm_flags == vm_flags,
76398 * so this tests vma->vm_flags & VM_SPECIAL, too.
76399@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76400 if (next && next->vm_end == end) /* cases 6, 7, 8 */
76401 next = next->vm_next;
76402
76403+#ifdef CONFIG_PAX_SEGMEXEC
76404+ if (prev)
76405+ prev_m = pax_find_mirror_vma(prev);
76406+ if (area)
76407+ area_m = pax_find_mirror_vma(area);
76408+ if (next)
76409+ next_m = pax_find_mirror_vma(next);
76410+#endif
76411+
76412 /*
76413 * Can it merge with the predecessor?
76414 */
76415@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76416 /* cases 1, 6 */
76417 vma_adjust(prev, prev->vm_start,
76418 next->vm_end, prev->vm_pgoff, NULL);
76419- } else /* cases 2, 5, 7 */
76420+
76421+#ifdef CONFIG_PAX_SEGMEXEC
76422+ if (prev_m)
76423+ vma_adjust(prev_m, prev_m->vm_start,
76424+ next_m->vm_end, prev_m->vm_pgoff, NULL);
76425+#endif
76426+
76427+ } else { /* cases 2, 5, 7 */
76428 vma_adjust(prev, prev->vm_start,
76429 end, prev->vm_pgoff, NULL);
76430+
76431+#ifdef CONFIG_PAX_SEGMEXEC
76432+ if (prev_m)
76433+ vma_adjust(prev_m, prev_m->vm_start,
76434+ end_m, prev_m->vm_pgoff, NULL);
76435+#endif
76436+
76437+ }
76438 return prev;
76439 }
76440
76441@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76442 mpol_equal(policy, vma_policy(next)) &&
76443 can_vma_merge_before(next, vm_flags,
76444 anon_vma, file, pgoff+pglen)) {
76445- if (prev && addr < prev->vm_end) /* case 4 */
76446+ if (prev && addr < prev->vm_end) { /* case 4 */
76447 vma_adjust(prev, prev->vm_start,
76448 addr, prev->vm_pgoff, NULL);
76449- else /* cases 3, 8 */
76450+
76451+#ifdef CONFIG_PAX_SEGMEXEC
76452+ if (prev_m)
76453+ vma_adjust(prev_m, prev_m->vm_start,
76454+ addr_m, prev_m->vm_pgoff, NULL);
76455+#endif
76456+
76457+ } else { /* cases 3, 8 */
76458 vma_adjust(area, addr, next->vm_end,
76459 next->vm_pgoff - pglen, NULL);
76460+
76461+#ifdef CONFIG_PAX_SEGMEXEC
76462+ if (area_m)
76463+ vma_adjust(area_m, addr_m, next_m->vm_end,
76464+ next_m->vm_pgoff - pglen, NULL);
76465+#endif
76466+
76467+ }
76468 return area;
76469 }
76470
76471@@ -898,14 +978,11 @@ none:
76472 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
76473 struct file *file, long pages)
76474 {
76475- const unsigned long stack_flags
76476- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
76477-
76478 if (file) {
76479 mm->shared_vm += pages;
76480 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
76481 mm->exec_vm += pages;
76482- } else if (flags & stack_flags)
76483+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
76484 mm->stack_vm += pages;
76485 if (flags & (VM_RESERVED|VM_IO))
76486 mm->reserved_vm += pages;
76487@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76488 * (the exception is when the underlying filesystem is noexec
76489 * mounted, in which case we dont add PROT_EXEC.)
76490 */
76491- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76492+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76493 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
76494 prot |= PROT_EXEC;
76495
76496@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76497 /* Obtain the address to map to. we verify (or select) it and ensure
76498 * that it represents a valid section of the address space.
76499 */
76500- addr = get_unmapped_area(file, addr, len, pgoff, flags);
76501+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
76502 if (addr & ~PAGE_MASK)
76503 return addr;
76504
76505@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76506 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
76507 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
76508
76509+#ifdef CONFIG_PAX_MPROTECT
76510+ if (mm->pax_flags & MF_PAX_MPROTECT) {
76511+#ifndef CONFIG_PAX_MPROTECT_COMPAT
76512+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
76513+ gr_log_rwxmmap(file);
76514+
76515+#ifdef CONFIG_PAX_EMUPLT
76516+ vm_flags &= ~VM_EXEC;
76517+#else
76518+ return -EPERM;
76519+#endif
76520+
76521+ }
76522+
76523+ if (!(vm_flags & VM_EXEC))
76524+ vm_flags &= ~VM_MAYEXEC;
76525+#else
76526+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76527+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76528+#endif
76529+ else
76530+ vm_flags &= ~VM_MAYWRITE;
76531+ }
76532+#endif
76533+
76534+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76535+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
76536+ vm_flags &= ~VM_PAGEEXEC;
76537+#endif
76538+
76539 if (flags & MAP_LOCKED)
76540 if (!can_do_mlock())
76541 return -EPERM;
76542@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76543 locked += mm->locked_vm;
76544 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
76545 lock_limit >>= PAGE_SHIFT;
76546+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
76547 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
76548 return -EAGAIN;
76549 }
76550@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
76551 if (error)
76552 return error;
76553
76554+ if (!gr_acl_handle_mmap(file, prot))
76555+ return -EACCES;
76556+
76557 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
76558 }
76559 EXPORT_SYMBOL(do_mmap_pgoff);
76560@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
76561 */
76562 int vma_wants_writenotify(struct vm_area_struct *vma)
76563 {
76564- unsigned int vm_flags = vma->vm_flags;
76565+ unsigned long vm_flags = vma->vm_flags;
76566
76567 /* If it was private or non-writable, the write bit is already clear */
76568- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
76569+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
76570 return 0;
76571
76572 /* The backer wishes to know when pages are first written to? */
76573@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
76574 unsigned long charged = 0;
76575 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
76576
76577+#ifdef CONFIG_PAX_SEGMEXEC
76578+ struct vm_area_struct *vma_m = NULL;
76579+#endif
76580+
76581+ /*
76582+ * mm->mmap_sem is required to protect against another thread
76583+ * changing the mappings in case we sleep.
76584+ */
76585+ verify_mm_writelocked(mm);
76586+
76587 /* Clear old maps */
76588 error = -ENOMEM;
76589-munmap_back:
76590 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
76591 if (vma && vma->vm_start < addr + len) {
76592 if (do_munmap(mm, addr, len))
76593 return -ENOMEM;
76594- goto munmap_back;
76595+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
76596+ BUG_ON(vma && vma->vm_start < addr + len);
76597 }
76598
76599 /* Check against address space limit. */
76600@@ -1173,6 +1294,16 @@ munmap_back:
76601 goto unacct_error;
76602 }
76603
76604+#ifdef CONFIG_PAX_SEGMEXEC
76605+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
76606+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76607+ if (!vma_m) {
76608+ error = -ENOMEM;
76609+ goto free_vma;
76610+ }
76611+ }
76612+#endif
76613+
76614 vma->vm_mm = mm;
76615 vma->vm_start = addr;
76616 vma->vm_end = addr + len;
76617@@ -1195,6 +1326,19 @@ munmap_back:
76618 error = file->f_op->mmap(file, vma);
76619 if (error)
76620 goto unmap_and_free_vma;
76621+
76622+#ifdef CONFIG_PAX_SEGMEXEC
76623+ if (vma_m && (vm_flags & VM_EXECUTABLE))
76624+ added_exe_file_vma(mm);
76625+#endif
76626+
76627+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76628+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
76629+ vma->vm_flags |= VM_PAGEEXEC;
76630+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76631+ }
76632+#endif
76633+
76634 if (vm_flags & VM_EXECUTABLE)
76635 added_exe_file_vma(mm);
76636
76637@@ -1218,6 +1362,11 @@ munmap_back:
76638 vma_link(mm, vma, prev, rb_link, rb_parent);
76639 file = vma->vm_file;
76640
76641+#ifdef CONFIG_PAX_SEGMEXEC
76642+ if (vma_m)
76643+ pax_mirror_vma(vma_m, vma);
76644+#endif
76645+
76646 /* Once vma denies write, undo our temporary denial count */
76647 if (correct_wcount)
76648 atomic_inc(&inode->i_writecount);
76649@@ -1226,6 +1375,7 @@ out:
76650
76651 mm->total_vm += len >> PAGE_SHIFT;
76652 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
76653+ track_exec_limit(mm, addr, addr + len, vm_flags);
76654 if (vm_flags & VM_LOCKED) {
76655 /*
76656 * makes pages present; downgrades, drops, reacquires mmap_sem
76657@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
76658 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
76659 charged = 0;
76660 free_vma:
76661+
76662+#ifdef CONFIG_PAX_SEGMEXEC
76663+ if (vma_m)
76664+ kmem_cache_free(vm_area_cachep, vma_m);
76665+#endif
76666+
76667 kmem_cache_free(vm_area_cachep, vma);
76668 unacct_error:
76669 if (charged)
76670@@ -1255,6 +1411,44 @@ unacct_error:
76671 return error;
76672 }
76673
76674+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
76675+{
76676+ if (!vma) {
76677+#ifdef CONFIG_STACK_GROWSUP
76678+ if (addr > sysctl_heap_stack_gap)
76679+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
76680+ else
76681+ vma = find_vma(current->mm, 0);
76682+ if (vma && (vma->vm_flags & VM_GROWSUP))
76683+ return false;
76684+#endif
76685+ return true;
76686+ }
76687+
76688+ if (addr + len > vma->vm_start)
76689+ return false;
76690+
76691+ if (vma->vm_flags & VM_GROWSDOWN)
76692+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
76693+#ifdef CONFIG_STACK_GROWSUP
76694+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
76695+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
76696+#endif
76697+
76698+ return true;
76699+}
76700+
76701+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
76702+{
76703+ if (vma->vm_start < len)
76704+ return -ENOMEM;
76705+ if (!(vma->vm_flags & VM_GROWSDOWN))
76706+ return vma->vm_start - len;
76707+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
76708+ return vma->vm_start - len - sysctl_heap_stack_gap;
76709+ return -ENOMEM;
76710+}
76711+
76712 /* Get an address range which is currently unmapped.
76713 * For shmat() with addr=0.
76714 *
76715@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
76716 if (flags & MAP_FIXED)
76717 return addr;
76718
76719+#ifdef CONFIG_PAX_RANDMMAP
76720+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76721+#endif
76722+
76723 if (addr) {
76724 addr = PAGE_ALIGN(addr);
76725- vma = find_vma(mm, addr);
76726- if (TASK_SIZE - len >= addr &&
76727- (!vma || addr + len <= vma->vm_start))
76728- return addr;
76729+ if (TASK_SIZE - len >= addr) {
76730+ vma = find_vma(mm, addr);
76731+ if (check_heap_stack_gap(vma, addr, len))
76732+ return addr;
76733+ }
76734 }
76735 if (len > mm->cached_hole_size) {
76736- start_addr = addr = mm->free_area_cache;
76737+ start_addr = addr = mm->free_area_cache;
76738 } else {
76739- start_addr = addr = TASK_UNMAPPED_BASE;
76740- mm->cached_hole_size = 0;
76741+ start_addr = addr = mm->mmap_base;
76742+ mm->cached_hole_size = 0;
76743 }
76744
76745 full_search:
76746@@ -1303,34 +1502,40 @@ full_search:
76747 * Start a new search - just in case we missed
76748 * some holes.
76749 */
76750- if (start_addr != TASK_UNMAPPED_BASE) {
76751- addr = TASK_UNMAPPED_BASE;
76752- start_addr = addr;
76753+ if (start_addr != mm->mmap_base) {
76754+ start_addr = addr = mm->mmap_base;
76755 mm->cached_hole_size = 0;
76756 goto full_search;
76757 }
76758 return -ENOMEM;
76759 }
76760- if (!vma || addr + len <= vma->vm_start) {
76761- /*
76762- * Remember the place where we stopped the search:
76763- */
76764- mm->free_area_cache = addr + len;
76765- return addr;
76766- }
76767+ if (check_heap_stack_gap(vma, addr, len))
76768+ break;
76769 if (addr + mm->cached_hole_size < vma->vm_start)
76770 mm->cached_hole_size = vma->vm_start - addr;
76771 addr = vma->vm_end;
76772 }
76773+
76774+ /*
76775+ * Remember the place where we stopped the search:
76776+ */
76777+ mm->free_area_cache = addr + len;
76778+ return addr;
76779 }
76780 #endif
76781
76782 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
76783 {
76784+
76785+#ifdef CONFIG_PAX_SEGMEXEC
76786+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
76787+ return;
76788+#endif
76789+
76790 /*
76791 * Is this a new hole at the lowest possible address?
76792 */
76793- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
76794+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
76795 mm->free_area_cache = addr;
76796 mm->cached_hole_size = ~0UL;
76797 }
76798@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76799 {
76800 struct vm_area_struct *vma;
76801 struct mm_struct *mm = current->mm;
76802- unsigned long addr = addr0;
76803+ unsigned long base = mm->mmap_base, addr = addr0;
76804
76805 /* requested length too big for entire address space */
76806 if (len > TASK_SIZE)
76807@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76808 if (flags & MAP_FIXED)
76809 return addr;
76810
76811+#ifdef CONFIG_PAX_RANDMMAP
76812+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
76813+#endif
76814+
76815 /* requesting a specific address */
76816 if (addr) {
76817 addr = PAGE_ALIGN(addr);
76818- vma = find_vma(mm, addr);
76819- if (TASK_SIZE - len >= addr &&
76820- (!vma || addr + len <= vma->vm_start))
76821- return addr;
76822+ if (TASK_SIZE - len >= addr) {
76823+ vma = find_vma(mm, addr);
76824+ if (check_heap_stack_gap(vma, addr, len))
76825+ return addr;
76826+ }
76827 }
76828
76829 /* check if free_area_cache is useful for us */
76830@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76831 /* make sure it can fit in the remaining address space */
76832 if (addr > len) {
76833 vma = find_vma(mm, addr-len);
76834- if (!vma || addr <= vma->vm_start)
76835+ if (check_heap_stack_gap(vma, addr - len, len))
76836 /* remember the address as a hint for next time */
76837 return (mm->free_area_cache = addr-len);
76838 }
76839@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76840 * return with success:
76841 */
76842 vma = find_vma(mm, addr);
76843- if (!vma || addr+len <= vma->vm_start)
76844+ if (check_heap_stack_gap(vma, addr, len))
76845 /* remember the address as a hint for next time */
76846 return (mm->free_area_cache = addr);
76847
76848@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
76849 mm->cached_hole_size = vma->vm_start - addr;
76850
76851 /* try just below the current vma->vm_start */
76852- addr = vma->vm_start-len;
76853- } while (len < vma->vm_start);
76854+ addr = skip_heap_stack_gap(vma, len);
76855+ } while (!IS_ERR_VALUE(addr));
76856
76857 bottomup:
76858 /*
76859@@ -1414,13 +1624,21 @@ bottomup:
76860 * can happen with large stack limits and large mmap()
76861 * allocations.
76862 */
76863+ mm->mmap_base = TASK_UNMAPPED_BASE;
76864+
76865+#ifdef CONFIG_PAX_RANDMMAP
76866+ if (mm->pax_flags & MF_PAX_RANDMMAP)
76867+ mm->mmap_base += mm->delta_mmap;
76868+#endif
76869+
76870+ mm->free_area_cache = mm->mmap_base;
76871 mm->cached_hole_size = ~0UL;
76872- mm->free_area_cache = TASK_UNMAPPED_BASE;
76873 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
76874 /*
76875 * Restore the topdown base:
76876 */
76877- mm->free_area_cache = mm->mmap_base;
76878+ mm->mmap_base = base;
76879+ mm->free_area_cache = base;
76880 mm->cached_hole_size = ~0UL;
76881
76882 return addr;
76883@@ -1429,6 +1647,12 @@ bottomup:
76884
76885 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
76886 {
76887+
76888+#ifdef CONFIG_PAX_SEGMEXEC
76889+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
76890+ return;
76891+#endif
76892+
76893 /*
76894 * Is this a new hole at the highest possible address?
76895 */
76896@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
76897 mm->free_area_cache = addr;
76898
76899 /* dont allow allocations above current base */
76900- if (mm->free_area_cache > mm->mmap_base)
76901+ if (mm->free_area_cache > mm->mmap_base) {
76902 mm->free_area_cache = mm->mmap_base;
76903+ mm->cached_hole_size = ~0UL;
76904+ }
76905 }
76906
76907 unsigned long
76908@@ -1545,6 +1771,27 @@ out:
76909 return prev ? prev->vm_next : vma;
76910 }
76911
76912+#ifdef CONFIG_PAX_SEGMEXEC
76913+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
76914+{
76915+ struct vm_area_struct *vma_m;
76916+
76917+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
76918+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
76919+ BUG_ON(vma->vm_mirror);
76920+ return NULL;
76921+ }
76922+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
76923+ vma_m = vma->vm_mirror;
76924+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
76925+ BUG_ON(vma->vm_file != vma_m->vm_file);
76926+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
76927+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
76928+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
76929+ return vma_m;
76930+}
76931+#endif
76932+
76933 /*
76934 * Verify that the stack growth is acceptable and
76935 * update accounting. This is shared with both the
76936@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
76937 return -ENOMEM;
76938
76939 /* Stack limit test */
76940+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
76941 if (size > rlim[RLIMIT_STACK].rlim_cur)
76942 return -ENOMEM;
76943
76944@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
76945 unsigned long limit;
76946 locked = mm->locked_vm + grow;
76947 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
76948+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
76949 if (locked > limit && !capable(CAP_IPC_LOCK))
76950 return -ENOMEM;
76951 }
76952@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
76953 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
76954 * vma is the last one with address > vma->vm_end. Have to extend vma.
76955 */
76956+#ifndef CONFIG_IA64
76957+static
76958+#endif
76959 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
76960 {
76961 int error;
76962+ bool locknext;
76963
76964 if (!(vma->vm_flags & VM_GROWSUP))
76965 return -EFAULT;
76966
76967+ /* Also guard against wrapping around to address 0. */
76968+ if (address < PAGE_ALIGN(address+1))
76969+ address = PAGE_ALIGN(address+1);
76970+ else
76971+ return -ENOMEM;
76972+
76973 /*
76974 * We must make sure the anon_vma is allocated
76975 * so that the anon_vma locking is not a noop.
76976 */
76977 if (unlikely(anon_vma_prepare(vma)))
76978 return -ENOMEM;
76979+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
76980+ if (locknext && anon_vma_prepare(vma->vm_next))
76981+ return -ENOMEM;
76982 anon_vma_lock(vma);
76983+ if (locknext)
76984+ anon_vma_lock(vma->vm_next);
76985
76986 /*
76987 * vma->vm_start/vm_end cannot change under us because the caller
76988 * is required to hold the mmap_sem in read mode. We need the
76989- * anon_vma lock to serialize against concurrent expand_stacks.
76990- * Also guard against wrapping around to address 0.
76991+ * anon_vma locks to serialize against concurrent expand_stacks
76992+ * and expand_upwards.
76993 */
76994- if (address < PAGE_ALIGN(address+4))
76995- address = PAGE_ALIGN(address+4);
76996- else {
76997- anon_vma_unlock(vma);
76998- return -ENOMEM;
76999- }
77000 error = 0;
77001
77002 /* Somebody else might have raced and expanded it already */
77003- if (address > vma->vm_end) {
77004+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
77005+ error = -ENOMEM;
77006+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
77007 unsigned long size, grow;
77008
77009 size = address - vma->vm_start;
77010@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77011 vma->vm_end = address;
77012 }
77013 }
77014+ if (locknext)
77015+ anon_vma_unlock(vma->vm_next);
77016 anon_vma_unlock(vma);
77017 return error;
77018 }
77019@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
77020 unsigned long address)
77021 {
77022 int error;
77023+ bool lockprev = false;
77024+ struct vm_area_struct *prev;
77025
77026 /*
77027 * We must make sure the anon_vma is allocated
77028@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
77029 if (error)
77030 return error;
77031
77032+ prev = vma->vm_prev;
77033+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
77034+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
77035+#endif
77036+ if (lockprev && anon_vma_prepare(prev))
77037+ return -ENOMEM;
77038+ if (lockprev)
77039+ anon_vma_lock(prev);
77040+
77041 anon_vma_lock(vma);
77042
77043 /*
77044@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
77045 */
77046
77047 /* Somebody else might have raced and expanded it already */
77048- if (address < vma->vm_start) {
77049+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
77050+ error = -ENOMEM;
77051+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
77052 unsigned long size, grow;
77053
77054+#ifdef CONFIG_PAX_SEGMEXEC
77055+ struct vm_area_struct *vma_m;
77056+
77057+ vma_m = pax_find_mirror_vma(vma);
77058+#endif
77059+
77060 size = vma->vm_end - address;
77061 grow = (vma->vm_start - address) >> PAGE_SHIFT;
77062
77063@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
77064 if (!error) {
77065 vma->vm_start = address;
77066 vma->vm_pgoff -= grow;
77067+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
77068+
77069+#ifdef CONFIG_PAX_SEGMEXEC
77070+ if (vma_m) {
77071+ vma_m->vm_start -= grow << PAGE_SHIFT;
77072+ vma_m->vm_pgoff -= grow;
77073+ }
77074+#endif
77075+
77076+
77077 }
77078 }
77079 }
77080 anon_vma_unlock(vma);
77081+ if (lockprev)
77082+ anon_vma_unlock(prev);
77083 return error;
77084 }
77085
77086@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
77087 do {
77088 long nrpages = vma_pages(vma);
77089
77090+#ifdef CONFIG_PAX_SEGMEXEC
77091+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
77092+ vma = remove_vma(vma);
77093+ continue;
77094+ }
77095+#endif
77096+
77097 mm->total_vm -= nrpages;
77098 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
77099 vma = remove_vma(vma);
77100@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
77101 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
77102 vma->vm_prev = NULL;
77103 do {
77104+
77105+#ifdef CONFIG_PAX_SEGMEXEC
77106+ if (vma->vm_mirror) {
77107+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
77108+ vma->vm_mirror->vm_mirror = NULL;
77109+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
77110+ vma->vm_mirror = NULL;
77111+ }
77112+#endif
77113+
77114 rb_erase(&vma->vm_rb, &mm->mm_rb);
77115 mm->map_count--;
77116 tail_vma = vma;
77117@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77118 struct mempolicy *pol;
77119 struct vm_area_struct *new;
77120
77121+#ifdef CONFIG_PAX_SEGMEXEC
77122+ struct vm_area_struct *vma_m, *new_m = NULL;
77123+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
77124+#endif
77125+
77126 if (is_vm_hugetlb_page(vma) && (addr &
77127 ~(huge_page_mask(hstate_vma(vma)))))
77128 return -EINVAL;
77129
77130+#ifdef CONFIG_PAX_SEGMEXEC
77131+ vma_m = pax_find_mirror_vma(vma);
77132+
77133+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77134+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
77135+ if (mm->map_count >= sysctl_max_map_count-1)
77136+ return -ENOMEM;
77137+ } else
77138+#endif
77139+
77140 if (mm->map_count >= sysctl_max_map_count)
77141 return -ENOMEM;
77142
77143@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77144 if (!new)
77145 return -ENOMEM;
77146
77147+#ifdef CONFIG_PAX_SEGMEXEC
77148+ if (vma_m) {
77149+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
77150+ if (!new_m) {
77151+ kmem_cache_free(vm_area_cachep, new);
77152+ return -ENOMEM;
77153+ }
77154+ }
77155+#endif
77156+
77157 /* most fields are the same, copy all, and then fixup */
77158 *new = *vma;
77159
77160@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77161 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
77162 }
77163
77164+#ifdef CONFIG_PAX_SEGMEXEC
77165+ if (vma_m) {
77166+ *new_m = *vma_m;
77167+ new_m->vm_mirror = new;
77168+ new->vm_mirror = new_m;
77169+
77170+ if (new_below)
77171+ new_m->vm_end = addr_m;
77172+ else {
77173+ new_m->vm_start = addr_m;
77174+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
77175+ }
77176+ }
77177+#endif
77178+
77179 pol = mpol_dup(vma_policy(vma));
77180 if (IS_ERR(pol)) {
77181+
77182+#ifdef CONFIG_PAX_SEGMEXEC
77183+ if (new_m)
77184+ kmem_cache_free(vm_area_cachep, new_m);
77185+#endif
77186+
77187 kmem_cache_free(vm_area_cachep, new);
77188 return PTR_ERR(pol);
77189 }
77190@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77191 else
77192 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
77193
77194+#ifdef CONFIG_PAX_SEGMEXEC
77195+ if (vma_m) {
77196+ mpol_get(pol);
77197+ vma_set_policy(new_m, pol);
77198+
77199+ if (new_m->vm_file) {
77200+ get_file(new_m->vm_file);
77201+ if (vma_m->vm_flags & VM_EXECUTABLE)
77202+ added_exe_file_vma(mm);
77203+ }
77204+
77205+ if (new_m->vm_ops && new_m->vm_ops->open)
77206+ new_m->vm_ops->open(new_m);
77207+
77208+ if (new_below)
77209+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
77210+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
77211+ else
77212+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
77213+ }
77214+#endif
77215+
77216 return 0;
77217 }
77218
77219@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77220 * work. This now handles partial unmappings.
77221 * Jeremy Fitzhardinge <jeremy@goop.org>
77222 */
77223+#ifdef CONFIG_PAX_SEGMEXEC
77224+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77225+{
77226+ int ret = __do_munmap(mm, start, len);
77227+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
77228+ return ret;
77229+
77230+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
77231+}
77232+
77233+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77234+#else
77235 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77236+#endif
77237 {
77238 unsigned long end;
77239 struct vm_area_struct *vma, *prev, *last;
77240
77241+ /*
77242+ * mm->mmap_sem is required to protect against another thread
77243+ * changing the mappings in case we sleep.
77244+ */
77245+ verify_mm_writelocked(mm);
77246+
77247 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
77248 return -EINVAL;
77249
77250@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77251 /* Fix up all other VM information */
77252 remove_vma_list(mm, vma);
77253
77254+ track_exec_limit(mm, start, end, 0UL);
77255+
77256 return 0;
77257 }
77258
77259@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
77260
77261 profile_munmap(addr);
77262
77263+#ifdef CONFIG_PAX_SEGMEXEC
77264+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
77265+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
77266+ return -EINVAL;
77267+#endif
77268+
77269 down_write(&mm->mmap_sem);
77270 ret = do_munmap(mm, addr, len);
77271 up_write(&mm->mmap_sem);
77272 return ret;
77273 }
77274
77275-static inline void verify_mm_writelocked(struct mm_struct *mm)
77276-{
77277-#ifdef CONFIG_DEBUG_VM
77278- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77279- WARN_ON(1);
77280- up_read(&mm->mmap_sem);
77281- }
77282-#endif
77283-}
77284-
77285 /*
77286 * this is really a simplified "do_mmap". it only handles
77287 * anonymous maps. eventually we may be able to do some
77288@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77289 struct rb_node ** rb_link, * rb_parent;
77290 pgoff_t pgoff = addr >> PAGE_SHIFT;
77291 int error;
77292+ unsigned long charged;
77293
77294 len = PAGE_ALIGN(len);
77295 if (!len)
77296@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77297
77298 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
77299
77300+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
77301+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
77302+ flags &= ~VM_EXEC;
77303+
77304+#ifdef CONFIG_PAX_MPROTECT
77305+ if (mm->pax_flags & MF_PAX_MPROTECT)
77306+ flags &= ~VM_MAYEXEC;
77307+#endif
77308+
77309+ }
77310+#endif
77311+
77312 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
77313 if (error & ~PAGE_MASK)
77314 return error;
77315
77316+ charged = len >> PAGE_SHIFT;
77317+
77318 /*
77319 * mlock MCL_FUTURE?
77320 */
77321 if (mm->def_flags & VM_LOCKED) {
77322 unsigned long locked, lock_limit;
77323- locked = len >> PAGE_SHIFT;
77324+ locked = charged;
77325 locked += mm->locked_vm;
77326 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77327 lock_limit >>= PAGE_SHIFT;
77328@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77329 /*
77330 * Clear old maps. this also does some error checking for us
77331 */
77332- munmap_back:
77333 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77334 if (vma && vma->vm_start < addr + len) {
77335 if (do_munmap(mm, addr, len))
77336 return -ENOMEM;
77337- goto munmap_back;
77338+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77339+ BUG_ON(vma && vma->vm_start < addr + len);
77340 }
77341
77342 /* Check against address space limits *after* clearing old maps... */
77343- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
77344+ if (!may_expand_vm(mm, charged))
77345 return -ENOMEM;
77346
77347 if (mm->map_count > sysctl_max_map_count)
77348 return -ENOMEM;
77349
77350- if (security_vm_enough_memory(len >> PAGE_SHIFT))
77351+ if (security_vm_enough_memory(charged))
77352 return -ENOMEM;
77353
77354 /* Can we just expand an old private anonymous mapping? */
77355@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77356 */
77357 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77358 if (!vma) {
77359- vm_unacct_memory(len >> PAGE_SHIFT);
77360+ vm_unacct_memory(charged);
77361 return -ENOMEM;
77362 }
77363
77364@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77365 vma->vm_page_prot = vm_get_page_prot(flags);
77366 vma_link(mm, vma, prev, rb_link, rb_parent);
77367 out:
77368- mm->total_vm += len >> PAGE_SHIFT;
77369+ mm->total_vm += charged;
77370 if (flags & VM_LOCKED) {
77371 if (!mlock_vma_pages_range(vma, addr, addr + len))
77372- mm->locked_vm += (len >> PAGE_SHIFT);
77373+ mm->locked_vm += charged;
77374 }
77375+ track_exec_limit(mm, addr, addr + len, flags);
77376 return addr;
77377 }
77378
77379@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
77380 * Walk the list again, actually closing and freeing it,
77381 * with preemption enabled, without holding any MM locks.
77382 */
77383- while (vma)
77384+ while (vma) {
77385+ vma->vm_mirror = NULL;
77386 vma = remove_vma(vma);
77387+ }
77388
77389 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
77390 }
77391@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77392 struct vm_area_struct * __vma, * prev;
77393 struct rb_node ** rb_link, * rb_parent;
77394
77395+#ifdef CONFIG_PAX_SEGMEXEC
77396+ struct vm_area_struct *vma_m = NULL;
77397+#endif
77398+
77399 /*
77400 * The vm_pgoff of a purely anonymous vma should be irrelevant
77401 * until its first write fault, when page's anon_vma and index
77402@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77403 if ((vma->vm_flags & VM_ACCOUNT) &&
77404 security_vm_enough_memory_mm(mm, vma_pages(vma)))
77405 return -ENOMEM;
77406+
77407+#ifdef CONFIG_PAX_SEGMEXEC
77408+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
77409+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77410+ if (!vma_m)
77411+ return -ENOMEM;
77412+ }
77413+#endif
77414+
77415 vma_link(mm, vma, prev, rb_link, rb_parent);
77416+
77417+#ifdef CONFIG_PAX_SEGMEXEC
77418+ if (vma_m)
77419+ pax_mirror_vma(vma_m, vma);
77420+#endif
77421+
77422 return 0;
77423 }
77424
77425@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77426 struct rb_node **rb_link, *rb_parent;
77427 struct mempolicy *pol;
77428
77429+ BUG_ON(vma->vm_mirror);
77430+
77431 /*
77432 * If anonymous vma has not yet been faulted, update new pgoff
77433 * to match new location, to increase its chance of merging.
77434@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77435 return new_vma;
77436 }
77437
77438+#ifdef CONFIG_PAX_SEGMEXEC
77439+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
77440+{
77441+ struct vm_area_struct *prev_m;
77442+ struct rb_node **rb_link_m, *rb_parent_m;
77443+ struct mempolicy *pol_m;
77444+
77445+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
77446+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
77447+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
77448+ *vma_m = *vma;
77449+ pol_m = vma_policy(vma_m);
77450+ mpol_get(pol_m);
77451+ vma_set_policy(vma_m, pol_m);
77452+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
77453+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
77454+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
77455+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
77456+ if (vma_m->vm_file)
77457+ get_file(vma_m->vm_file);
77458+ if (vma_m->vm_ops && vma_m->vm_ops->open)
77459+ vma_m->vm_ops->open(vma_m);
77460+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
77461+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
77462+ vma_m->vm_mirror = vma;
77463+ vma->vm_mirror = vma_m;
77464+}
77465+#endif
77466+
77467 /*
77468 * Return true if the calling process may expand its vm space by the passed
77469 * number of pages
77470@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
77471 unsigned long lim;
77472
77473 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
77474-
77475+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
77476 if (cur + npages > lim)
77477 return 0;
77478 return 1;
77479@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
77480 vma->vm_start = addr;
77481 vma->vm_end = addr + len;
77482
77483+#ifdef CONFIG_PAX_MPROTECT
77484+ if (mm->pax_flags & MF_PAX_MPROTECT) {
77485+#ifndef CONFIG_PAX_MPROTECT_COMPAT
77486+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
77487+ return -EPERM;
77488+ if (!(vm_flags & VM_EXEC))
77489+ vm_flags &= ~VM_MAYEXEC;
77490+#else
77491+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77492+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77493+#endif
77494+ else
77495+ vm_flags &= ~VM_MAYWRITE;
77496+ }
77497+#endif
77498+
77499 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
77500 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77501
77502diff --git a/mm/mprotect.c b/mm/mprotect.c
77503index 1737c7e..c7faeb4 100644
77504--- a/mm/mprotect.c
77505+++ b/mm/mprotect.c
77506@@ -24,10 +24,16 @@
77507 #include <linux/mmu_notifier.h>
77508 #include <linux/migrate.h>
77509 #include <linux/perf_event.h>
77510+
77511+#ifdef CONFIG_PAX_MPROTECT
77512+#include <linux/elf.h>
77513+#endif
77514+
77515 #include <asm/uaccess.h>
77516 #include <asm/pgtable.h>
77517 #include <asm/cacheflush.h>
77518 #include <asm/tlbflush.h>
77519+#include <asm/mmu_context.h>
77520
77521 #ifndef pgprot_modify
77522 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
77523@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
77524 flush_tlb_range(vma, start, end);
77525 }
77526
77527+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77528+/* called while holding the mmap semaphor for writing except stack expansion */
77529+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
77530+{
77531+ unsigned long oldlimit, newlimit = 0UL;
77532+
77533+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
77534+ return;
77535+
77536+ spin_lock(&mm->page_table_lock);
77537+ oldlimit = mm->context.user_cs_limit;
77538+ if ((prot & VM_EXEC) && oldlimit < end)
77539+ /* USER_CS limit moved up */
77540+ newlimit = end;
77541+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
77542+ /* USER_CS limit moved down */
77543+ newlimit = start;
77544+
77545+ if (newlimit) {
77546+ mm->context.user_cs_limit = newlimit;
77547+
77548+#ifdef CONFIG_SMP
77549+ wmb();
77550+ cpus_clear(mm->context.cpu_user_cs_mask);
77551+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
77552+#endif
77553+
77554+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
77555+ }
77556+ spin_unlock(&mm->page_table_lock);
77557+ if (newlimit == end) {
77558+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
77559+
77560+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
77561+ if (is_vm_hugetlb_page(vma))
77562+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
77563+ else
77564+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
77565+ }
77566+}
77567+#endif
77568+
77569 int
77570 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77571 unsigned long start, unsigned long end, unsigned long newflags)
77572@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77573 int error;
77574 int dirty_accountable = 0;
77575
77576+#ifdef CONFIG_PAX_SEGMEXEC
77577+ struct vm_area_struct *vma_m = NULL;
77578+ unsigned long start_m, end_m;
77579+
77580+ start_m = start + SEGMEXEC_TASK_SIZE;
77581+ end_m = end + SEGMEXEC_TASK_SIZE;
77582+#endif
77583+
77584 if (newflags == oldflags) {
77585 *pprev = vma;
77586 return 0;
77587 }
77588
77589+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
77590+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
77591+
77592+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
77593+ return -ENOMEM;
77594+
77595+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
77596+ return -ENOMEM;
77597+ }
77598+
77599 /*
77600 * If we make a private mapping writable we increase our commit;
77601 * but (without finer accounting) cannot reduce our commit if we
77602@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
77603 }
77604 }
77605
77606+#ifdef CONFIG_PAX_SEGMEXEC
77607+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
77608+ if (start != vma->vm_start) {
77609+ error = split_vma(mm, vma, start, 1);
77610+ if (error)
77611+ goto fail;
77612+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
77613+ *pprev = (*pprev)->vm_next;
77614+ }
77615+
77616+ if (end != vma->vm_end) {
77617+ error = split_vma(mm, vma, end, 0);
77618+ if (error)
77619+ goto fail;
77620+ }
77621+
77622+ if (pax_find_mirror_vma(vma)) {
77623+ error = __do_munmap(mm, start_m, end_m - start_m);
77624+ if (error)
77625+ goto fail;
77626+ } else {
77627+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77628+ if (!vma_m) {
77629+ error = -ENOMEM;
77630+ goto fail;
77631+ }
77632+ vma->vm_flags = newflags;
77633+ pax_mirror_vma(vma_m, vma);
77634+ }
77635+ }
77636+#endif
77637+
77638 /*
77639 * First try to merge with previous and/or next vma.
77640 */
77641@@ -195,9 +293,21 @@ success:
77642 * vm_flags and vm_page_prot are protected by the mmap_sem
77643 * held in write mode.
77644 */
77645+
77646+#ifdef CONFIG_PAX_SEGMEXEC
77647+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
77648+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
77649+#endif
77650+
77651 vma->vm_flags = newflags;
77652+
77653+#ifdef CONFIG_PAX_MPROTECT
77654+ if (mm->binfmt && mm->binfmt->handle_mprotect)
77655+ mm->binfmt->handle_mprotect(vma, newflags);
77656+#endif
77657+
77658 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
77659- vm_get_page_prot(newflags));
77660+ vm_get_page_prot(vma->vm_flags));
77661
77662 if (vma_wants_writenotify(vma)) {
77663 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
77664@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77665 end = start + len;
77666 if (end <= start)
77667 return -ENOMEM;
77668+
77669+#ifdef CONFIG_PAX_SEGMEXEC
77670+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77671+ if (end > SEGMEXEC_TASK_SIZE)
77672+ return -EINVAL;
77673+ } else
77674+#endif
77675+
77676+ if (end > TASK_SIZE)
77677+ return -EINVAL;
77678+
77679 if (!arch_validate_prot(prot))
77680 return -EINVAL;
77681
77682@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77683 /*
77684 * Does the application expect PROT_READ to imply PROT_EXEC:
77685 */
77686- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77687+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77688 prot |= PROT_EXEC;
77689
77690 vm_flags = calc_vm_prot_bits(prot);
77691@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77692 if (start > vma->vm_start)
77693 prev = vma;
77694
77695+#ifdef CONFIG_PAX_MPROTECT
77696+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
77697+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
77698+#endif
77699+
77700 for (nstart = start ; ; ) {
77701 unsigned long newflags;
77702
77703@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77704
77705 /* newflags >> 4 shift VM_MAY% in place of VM_% */
77706 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
77707+ if (prot & (PROT_WRITE | PROT_EXEC))
77708+ gr_log_rwxmprotect(vma->vm_file);
77709+
77710+ error = -EACCES;
77711+ goto out;
77712+ }
77713+
77714+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
77715 error = -EACCES;
77716 goto out;
77717 }
77718@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
77719 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
77720 if (error)
77721 goto out;
77722+
77723+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
77724+
77725 nstart = tmp;
77726
77727 if (nstart < prev->vm_end)
77728diff --git a/mm/mremap.c b/mm/mremap.c
77729index 3e98d79..1706cec 100644
77730--- a/mm/mremap.c
77731+++ b/mm/mremap.c
77732@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
77733 continue;
77734 pte = ptep_clear_flush(vma, old_addr, old_pte);
77735 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
77736+
77737+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77738+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
77739+ pte = pte_exprotect(pte);
77740+#endif
77741+
77742 set_pte_at(mm, new_addr, new_pte, pte);
77743 }
77744
77745@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
77746 if (is_vm_hugetlb_page(vma))
77747 goto Einval;
77748
77749+#ifdef CONFIG_PAX_SEGMEXEC
77750+ if (pax_find_mirror_vma(vma))
77751+ goto Einval;
77752+#endif
77753+
77754 /* We can't remap across vm area boundaries */
77755 if (old_len > vma->vm_end - addr)
77756 goto Efault;
77757@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
77758 unsigned long ret = -EINVAL;
77759 unsigned long charged = 0;
77760 unsigned long map_flags;
77761+ unsigned long pax_task_size = TASK_SIZE;
77762
77763 if (new_addr & ~PAGE_MASK)
77764 goto out;
77765
77766- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
77767+#ifdef CONFIG_PAX_SEGMEXEC
77768+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
77769+ pax_task_size = SEGMEXEC_TASK_SIZE;
77770+#endif
77771+
77772+ pax_task_size -= PAGE_SIZE;
77773+
77774+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
77775 goto out;
77776
77777 /* Check if the location we're moving into overlaps the
77778 * old location at all, and fail if it does.
77779 */
77780- if ((new_addr <= addr) && (new_addr+new_len) > addr)
77781- goto out;
77782-
77783- if ((addr <= new_addr) && (addr+old_len) > new_addr)
77784+ if (addr + old_len > new_addr && new_addr + new_len > addr)
77785 goto out;
77786
77787 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
77788@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
77789 struct vm_area_struct *vma;
77790 unsigned long ret = -EINVAL;
77791 unsigned long charged = 0;
77792+ unsigned long pax_task_size = TASK_SIZE;
77793
77794 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
77795 goto out;
77796@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
77797 if (!new_len)
77798 goto out;
77799
77800+#ifdef CONFIG_PAX_SEGMEXEC
77801+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
77802+ pax_task_size = SEGMEXEC_TASK_SIZE;
77803+#endif
77804+
77805+ pax_task_size -= PAGE_SIZE;
77806+
77807+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
77808+ old_len > pax_task_size || addr > pax_task_size-old_len)
77809+ goto out;
77810+
77811 if (flags & MREMAP_FIXED) {
77812 if (flags & MREMAP_MAYMOVE)
77813 ret = mremap_to(addr, old_len, new_addr, new_len);
77814@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
77815 addr + new_len);
77816 }
77817 ret = addr;
77818+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
77819 goto out;
77820 }
77821 }
77822@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
77823 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
77824 if (ret)
77825 goto out;
77826+
77827+ map_flags = vma->vm_flags;
77828 ret = move_vma(vma, addr, old_len, new_len, new_addr);
77829+ if (!(ret & ~PAGE_MASK)) {
77830+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
77831+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
77832+ }
77833 }
77834 out:
77835 if (ret & ~PAGE_MASK)
77836diff --git a/mm/nommu.c b/mm/nommu.c
77837index 406e8d4..53970d3 100644
77838--- a/mm/nommu.c
77839+++ b/mm/nommu.c
77840@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77841 int sysctl_overcommit_ratio = 50; /* default is 50% */
77842 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
77843 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
77844-int heap_stack_gap = 0;
77845
77846 atomic_long_t mmap_pages_allocated;
77847
77848@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
77849 EXPORT_SYMBOL(find_vma);
77850
77851 /*
77852- * find a VMA
77853- * - we don't extend stack VMAs under NOMMU conditions
77854- */
77855-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
77856-{
77857- return find_vma(mm, addr);
77858-}
77859-
77860-/*
77861 * expand a stack to a given address
77862 * - not supported under NOMMU conditions
77863 */
77864diff --git a/mm/page_alloc.c b/mm/page_alloc.c
77865index 3ecab7e..594a471 100644
77866--- a/mm/page_alloc.c
77867+++ b/mm/page_alloc.c
77868@@ -289,7 +289,7 @@ out:
77869 * This usage means that zero-order pages may not be compound.
77870 */
77871
77872-static void free_compound_page(struct page *page)
77873+void free_compound_page(struct page *page)
77874 {
77875 __free_pages_ok(page, compound_order(page));
77876 }
77877@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
77878 int bad = 0;
77879 int wasMlocked = __TestClearPageMlocked(page);
77880
77881+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77882+ unsigned long index = 1UL << order;
77883+#endif
77884+
77885 kmemcheck_free_shadow(page, order);
77886
77887 for (i = 0 ; i < (1 << order) ; ++i)
77888@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
77889 debug_check_no_obj_freed(page_address(page),
77890 PAGE_SIZE << order);
77891 }
77892+
77893+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77894+ for (; index; --index)
77895+ sanitize_highpage(page + index - 1);
77896+#endif
77897+
77898 arch_free_page(page, order);
77899 kernel_map_pages(page, 1 << order, 0);
77900
77901@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
77902 arch_alloc_page(page, order);
77903 kernel_map_pages(page, 1 << order, 1);
77904
77905+#ifndef CONFIG_PAX_MEMORY_SANITIZE
77906 if (gfp_flags & __GFP_ZERO)
77907 prep_zero_page(page, order, gfp_flags);
77908+#endif
77909
77910 if (order && (gfp_flags & __GFP_COMP))
77911 prep_compound_page(page, order);
77912@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
77913 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
77914 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
77915 }
77916+
77917+#ifdef CONFIG_PAX_MEMORY_SANITIZE
77918+ sanitize_highpage(page);
77919+#endif
77920+
77921 arch_free_page(page, 0);
77922 kernel_map_pages(page, 1, 0);
77923
77924@@ -2179,6 +2196,8 @@ void show_free_areas(void)
77925 int cpu;
77926 struct zone *zone;
77927
77928+ pax_track_stack();
77929+
77930 for_each_populated_zone(zone) {
77931 show_node(zone);
77932 printk("%s per-cpu:\n", zone->name);
77933@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
77934 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
77935 }
77936 #else
77937-static void inline setup_usemap(struct pglist_data *pgdat,
77938+static inline void setup_usemap(struct pglist_data *pgdat,
77939 struct zone *zone, unsigned long zonesize) {}
77940 #endif /* CONFIG_SPARSEMEM */
77941
77942diff --git a/mm/percpu.c b/mm/percpu.c
77943index 3bfd6e2..60404b9 100644
77944--- a/mm/percpu.c
77945+++ b/mm/percpu.c
77946@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly;
77947 static unsigned int pcpu_last_unit_cpu __read_mostly;
77948
77949 /* the address of the first chunk which starts with the kernel static area */
77950-void *pcpu_base_addr __read_mostly;
77951+void *pcpu_base_addr __read_only;
77952 EXPORT_SYMBOL_GPL(pcpu_base_addr);
77953
77954 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
77955diff --git a/mm/rmap.c b/mm/rmap.c
77956index dd43373..d848cd7 100644
77957--- a/mm/rmap.c
77958+++ b/mm/rmap.c
77959@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
77960 /* page_table_lock to protect against threads */
77961 spin_lock(&mm->page_table_lock);
77962 if (likely(!vma->anon_vma)) {
77963+
77964+#ifdef CONFIG_PAX_SEGMEXEC
77965+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
77966+
77967+ if (vma_m) {
77968+ BUG_ON(vma_m->anon_vma);
77969+ vma_m->anon_vma = anon_vma;
77970+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
77971+ }
77972+#endif
77973+
77974 vma->anon_vma = anon_vma;
77975 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
77976 allocated = NULL;
77977diff --git a/mm/shmem.c b/mm/shmem.c
77978index 3e0005b..1d659a8 100644
77979--- a/mm/shmem.c
77980+++ b/mm/shmem.c
77981@@ -31,7 +31,7 @@
77982 #include <linux/swap.h>
77983 #include <linux/ima.h>
77984
77985-static struct vfsmount *shm_mnt;
77986+struct vfsmount *shm_mnt;
77987
77988 #ifdef CONFIG_SHMEM
77989 /*
77990@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
77991 goto unlock;
77992 }
77993 entry = shmem_swp_entry(info, index, NULL);
77994+ if (!entry)
77995+ goto unlock;
77996 if (entry->val) {
77997 /*
77998 * The more uptodate page coming down from a stacked
77999@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
78000 struct vm_area_struct pvma;
78001 struct page *page;
78002
78003+ pax_track_stack();
78004+
78005 spol = mpol_cond_copy(&mpol,
78006 mpol_shared_policy_lookup(&info->policy, idx));
78007
78008@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
78009
78010 info = SHMEM_I(inode);
78011 inode->i_size = len-1;
78012- if (len <= (char *)inode - (char *)info) {
78013+ if (len <= (char *)inode - (char *)info && len <= 64) {
78014 /* do it inline */
78015 memcpy(info, symname, len);
78016 inode->i_op = &shmem_symlink_inline_operations;
78017@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
78018 int err = -ENOMEM;
78019
78020 /* Round up to L1_CACHE_BYTES to resist false sharing */
78021- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
78022- L1_CACHE_BYTES), GFP_KERNEL);
78023+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
78024 if (!sbinfo)
78025 return -ENOMEM;
78026
78027diff --git a/mm/slab.c b/mm/slab.c
78028index c8d466a..909e01e 100644
78029--- a/mm/slab.c
78030+++ b/mm/slab.c
78031@@ -174,7 +174,7 @@
78032
78033 /* Legal flag mask for kmem_cache_create(). */
78034 #if DEBUG
78035-# define CREATE_MASK (SLAB_RED_ZONE | \
78036+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
78037 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
78038 SLAB_CACHE_DMA | \
78039 SLAB_STORE_USER | \
78040@@ -182,7 +182,7 @@
78041 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78042 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
78043 #else
78044-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
78045+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
78046 SLAB_CACHE_DMA | \
78047 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
78048 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78049@@ -308,7 +308,7 @@ struct kmem_list3 {
78050 * Need this for bootstrapping a per node allocator.
78051 */
78052 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
78053-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
78054+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
78055 #define CACHE_CACHE 0
78056 #define SIZE_AC MAX_NUMNODES
78057 #define SIZE_L3 (2 * MAX_NUMNODES)
78058@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
78059 if ((x)->max_freeable < i) \
78060 (x)->max_freeable = i; \
78061 } while (0)
78062-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
78063-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
78064-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
78065-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
78066+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
78067+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
78068+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
78069+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
78070 #else
78071 #define STATS_INC_ACTIVE(x) do { } while (0)
78072 #define STATS_DEC_ACTIVE(x) do { } while (0)
78073@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
78074 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
78075 */
78076 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
78077- const struct slab *slab, void *obj)
78078+ const struct slab *slab, const void *obj)
78079 {
78080 u32 offset = (obj - slab->s_mem);
78081 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
78082@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
78083 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
78084 sizes[INDEX_AC].cs_size,
78085 ARCH_KMALLOC_MINALIGN,
78086- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78087+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78088 NULL);
78089
78090 if (INDEX_AC != INDEX_L3) {
78091@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
78092 kmem_cache_create(names[INDEX_L3].name,
78093 sizes[INDEX_L3].cs_size,
78094 ARCH_KMALLOC_MINALIGN,
78095- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78096+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78097 NULL);
78098 }
78099
78100@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
78101 sizes->cs_cachep = kmem_cache_create(names->name,
78102 sizes->cs_size,
78103 ARCH_KMALLOC_MINALIGN,
78104- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78105+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78106 NULL);
78107 }
78108 #ifdef CONFIG_ZONE_DMA
78109@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
78110 }
78111 /* cpu stats */
78112 {
78113- unsigned long allochit = atomic_read(&cachep->allochit);
78114- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
78115- unsigned long freehit = atomic_read(&cachep->freehit);
78116- unsigned long freemiss = atomic_read(&cachep->freemiss);
78117+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
78118+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
78119+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
78120+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
78121
78122 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
78123 allochit, allocmiss, freehit, freemiss);
78124@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
78125
78126 static int __init slab_proc_init(void)
78127 {
78128- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
78129+ mode_t gr_mode = S_IRUGO;
78130+
78131+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78132+ gr_mode = S_IRUSR;
78133+#endif
78134+
78135+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
78136 #ifdef CONFIG_DEBUG_SLAB_LEAK
78137- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
78138+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
78139 #endif
78140 return 0;
78141 }
78142 module_init(slab_proc_init);
78143 #endif
78144
78145+void check_object_size(const void *ptr, unsigned long n, bool to)
78146+{
78147+
78148+#ifdef CONFIG_PAX_USERCOPY
78149+ struct page *page;
78150+ struct kmem_cache *cachep = NULL;
78151+ struct slab *slabp;
78152+ unsigned int objnr;
78153+ unsigned long offset;
78154+ const char *type;
78155+
78156+ if (!n)
78157+ return;
78158+
78159+ type = "<null>";
78160+ if (ZERO_OR_NULL_PTR(ptr))
78161+ goto report;
78162+
78163+ if (!virt_addr_valid(ptr))
78164+ return;
78165+
78166+ page = virt_to_head_page(ptr);
78167+
78168+ type = "<process stack>";
78169+ if (!PageSlab(page)) {
78170+ if (object_is_on_stack(ptr, n) == -1)
78171+ goto report;
78172+ return;
78173+ }
78174+
78175+ cachep = page_get_cache(page);
78176+ type = cachep->name;
78177+ if (!(cachep->flags & SLAB_USERCOPY))
78178+ goto report;
78179+
78180+ slabp = page_get_slab(page);
78181+ objnr = obj_to_index(cachep, slabp, ptr);
78182+ BUG_ON(objnr >= cachep->num);
78183+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
78184+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
78185+ return;
78186+
78187+report:
78188+ pax_report_usercopy(ptr, n, to, type);
78189+#endif
78190+
78191+}
78192+EXPORT_SYMBOL(check_object_size);
78193+
78194 /**
78195 * ksize - get the actual amount of memory allocated for a given object
78196 * @objp: Pointer to the object
78197diff --git a/mm/slob.c b/mm/slob.c
78198index 837ebd6..4712174 100644
78199--- a/mm/slob.c
78200+++ b/mm/slob.c
78201@@ -29,7 +29,7 @@
78202 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
78203 * alloc_pages() directly, allocating compound pages so the page order
78204 * does not have to be separately tracked, and also stores the exact
78205- * allocation size in page->private so that it can be used to accurately
78206+ * allocation size in slob_page->size so that it can be used to accurately
78207 * provide ksize(). These objects are detected in kfree() because slob_page()
78208 * is false for them.
78209 *
78210@@ -58,6 +58,7 @@
78211 */
78212
78213 #include <linux/kernel.h>
78214+#include <linux/sched.h>
78215 #include <linux/slab.h>
78216 #include <linux/mm.h>
78217 #include <linux/swap.h> /* struct reclaim_state */
78218@@ -100,7 +101,8 @@ struct slob_page {
78219 unsigned long flags; /* mandatory */
78220 atomic_t _count; /* mandatory */
78221 slobidx_t units; /* free units left in page */
78222- unsigned long pad[2];
78223+ unsigned long pad[1];
78224+ unsigned long size; /* size when >=PAGE_SIZE */
78225 slob_t *free; /* first free slob_t in page */
78226 struct list_head list; /* linked list of free pages */
78227 };
78228@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
78229 */
78230 static inline int is_slob_page(struct slob_page *sp)
78231 {
78232- return PageSlab((struct page *)sp);
78233+ return PageSlab((struct page *)sp) && !sp->size;
78234 }
78235
78236 static inline void set_slob_page(struct slob_page *sp)
78237@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
78238
78239 static inline struct slob_page *slob_page(const void *addr)
78240 {
78241- return (struct slob_page *)virt_to_page(addr);
78242+ return (struct slob_page *)virt_to_head_page(addr);
78243 }
78244
78245 /*
78246@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
78247 /*
78248 * Return the size of a slob block.
78249 */
78250-static slobidx_t slob_units(slob_t *s)
78251+static slobidx_t slob_units(const slob_t *s)
78252 {
78253 if (s->units > 0)
78254 return s->units;
78255@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
78256 /*
78257 * Return the next free slob block pointer after this one.
78258 */
78259-static slob_t *slob_next(slob_t *s)
78260+static slob_t *slob_next(const slob_t *s)
78261 {
78262 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
78263 slobidx_t next;
78264@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
78265 /*
78266 * Returns true if s is the last free block in its page.
78267 */
78268-static int slob_last(slob_t *s)
78269+static int slob_last(const slob_t *s)
78270 {
78271 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
78272 }
78273@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
78274 if (!page)
78275 return NULL;
78276
78277+ set_slob_page(page);
78278 return page_address(page);
78279 }
78280
78281@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
78282 if (!b)
78283 return NULL;
78284 sp = slob_page(b);
78285- set_slob_page(sp);
78286
78287 spin_lock_irqsave(&slob_lock, flags);
78288 sp->units = SLOB_UNITS(PAGE_SIZE);
78289 sp->free = b;
78290+ sp->size = 0;
78291 INIT_LIST_HEAD(&sp->list);
78292 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
78293 set_slob_page_free(sp, slob_list);
78294@@ -475,10 +478,9 @@ out:
78295 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
78296 #endif
78297
78298-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78299+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
78300 {
78301- unsigned int *m;
78302- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78303+ slob_t *m;
78304 void *ret;
78305
78306 lockdep_trace_alloc(gfp);
78307@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78308
78309 if (!m)
78310 return NULL;
78311- *m = size;
78312+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
78313+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
78314+ m[0].units = size;
78315+ m[1].units = align;
78316 ret = (void *)m + align;
78317
78318 trace_kmalloc_node(_RET_IP_, ret,
78319@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78320
78321 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
78322 if (ret) {
78323- struct page *page;
78324- page = virt_to_page(ret);
78325- page->private = size;
78326+ struct slob_page *sp;
78327+ sp = slob_page(ret);
78328+ sp->size = size;
78329 }
78330
78331 trace_kmalloc_node(_RET_IP_, ret,
78332 size, PAGE_SIZE << order, gfp, node);
78333 }
78334
78335- kmemleak_alloc(ret, size, 1, gfp);
78336+ return ret;
78337+}
78338+
78339+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78340+{
78341+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78342+ void *ret = __kmalloc_node_align(size, gfp, node, align);
78343+
78344+ if (!ZERO_OR_NULL_PTR(ret))
78345+ kmemleak_alloc(ret, size, 1, gfp);
78346 return ret;
78347 }
78348 EXPORT_SYMBOL(__kmalloc_node);
78349@@ -528,13 +542,92 @@ void kfree(const void *block)
78350 sp = slob_page(block);
78351 if (is_slob_page(sp)) {
78352 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78353- unsigned int *m = (unsigned int *)(block - align);
78354- slob_free(m, *m + align);
78355- } else
78356+ slob_t *m = (slob_t *)(block - align);
78357+ slob_free(m, m[0].units + align);
78358+ } else {
78359+ clear_slob_page(sp);
78360+ free_slob_page(sp);
78361+ sp->size = 0;
78362 put_page(&sp->page);
78363+ }
78364 }
78365 EXPORT_SYMBOL(kfree);
78366
78367+void check_object_size(const void *ptr, unsigned long n, bool to)
78368+{
78369+
78370+#ifdef CONFIG_PAX_USERCOPY
78371+ struct slob_page *sp;
78372+ const slob_t *free;
78373+ const void *base;
78374+ unsigned long flags;
78375+ const char *type;
78376+
78377+ if (!n)
78378+ return;
78379+
78380+ type = "<null>";
78381+ if (ZERO_OR_NULL_PTR(ptr))
78382+ goto report;
78383+
78384+ if (!virt_addr_valid(ptr))
78385+ return;
78386+
78387+ type = "<process stack>";
78388+ sp = slob_page(ptr);
78389+ if (!PageSlab((struct page*)sp)) {
78390+ if (object_is_on_stack(ptr, n) == -1)
78391+ goto report;
78392+ return;
78393+ }
78394+
78395+ type = "<slob>";
78396+ if (sp->size) {
78397+ base = page_address(&sp->page);
78398+ if (base <= ptr && n <= sp->size - (ptr - base))
78399+ return;
78400+ goto report;
78401+ }
78402+
78403+ /* some tricky double walking to find the chunk */
78404+ spin_lock_irqsave(&slob_lock, flags);
78405+ base = (void *)((unsigned long)ptr & PAGE_MASK);
78406+ free = sp->free;
78407+
78408+ while (!slob_last(free) && (void *)free <= ptr) {
78409+ base = free + slob_units(free);
78410+ free = slob_next(free);
78411+ }
78412+
78413+ while (base < (void *)free) {
78414+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
78415+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
78416+ int offset;
78417+
78418+ if (ptr < base + align)
78419+ break;
78420+
78421+ offset = ptr - base - align;
78422+ if (offset >= m) {
78423+ base += size;
78424+ continue;
78425+ }
78426+
78427+ if (n > m - offset)
78428+ break;
78429+
78430+ spin_unlock_irqrestore(&slob_lock, flags);
78431+ return;
78432+ }
78433+
78434+ spin_unlock_irqrestore(&slob_lock, flags);
78435+report:
78436+ pax_report_usercopy(ptr, n, to, type);
78437+#endif
78438+
78439+}
78440+EXPORT_SYMBOL(check_object_size);
78441+
78442 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
78443 size_t ksize(const void *block)
78444 {
78445@@ -547,10 +640,10 @@ size_t ksize(const void *block)
78446 sp = slob_page(block);
78447 if (is_slob_page(sp)) {
78448 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78449- unsigned int *m = (unsigned int *)(block - align);
78450- return SLOB_UNITS(*m) * SLOB_UNIT;
78451+ slob_t *m = (slob_t *)(block - align);
78452+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
78453 } else
78454- return sp->page.private;
78455+ return sp->size;
78456 }
78457 EXPORT_SYMBOL(ksize);
78458
78459@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78460 {
78461 struct kmem_cache *c;
78462
78463+#ifdef CONFIG_PAX_USERCOPY
78464+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
78465+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
78466+#else
78467 c = slob_alloc(sizeof(struct kmem_cache),
78468 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
78469+#endif
78470
78471 if (c) {
78472 c->name = name;
78473@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
78474 {
78475 void *b;
78476
78477+#ifdef CONFIG_PAX_USERCOPY
78478+ b = __kmalloc_node_align(c->size, flags, node, c->align);
78479+#else
78480 if (c->size < PAGE_SIZE) {
78481 b = slob_alloc(c->size, flags, c->align, node);
78482 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
78483 SLOB_UNITS(c->size) * SLOB_UNIT,
78484 flags, node);
78485 } else {
78486+ struct slob_page *sp;
78487+
78488 b = slob_new_pages(flags, get_order(c->size), node);
78489+ sp = slob_page(b);
78490+ sp->size = c->size;
78491 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
78492 PAGE_SIZE << get_order(c->size),
78493 flags, node);
78494 }
78495+#endif
78496
78497 if (c->ctor)
78498 c->ctor(b);
78499@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
78500
78501 static void __kmem_cache_free(void *b, int size)
78502 {
78503- if (size < PAGE_SIZE)
78504+ struct slob_page *sp = slob_page(b);
78505+
78506+ if (is_slob_page(sp))
78507 slob_free(b, size);
78508- else
78509+ else {
78510+ clear_slob_page(sp);
78511+ free_slob_page(sp);
78512+ sp->size = 0;
78513 slob_free_pages(b, get_order(size));
78514+ }
78515 }
78516
78517 static void kmem_rcu_free(struct rcu_head *head)
78518@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
78519
78520 void kmem_cache_free(struct kmem_cache *c, void *b)
78521 {
78522+ int size = c->size;
78523+
78524+#ifdef CONFIG_PAX_USERCOPY
78525+ if (size + c->align < PAGE_SIZE) {
78526+ size += c->align;
78527+ b -= c->align;
78528+ }
78529+#endif
78530+
78531 kmemleak_free_recursive(b, c->flags);
78532 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
78533 struct slob_rcu *slob_rcu;
78534- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
78535+ slob_rcu = b + (size - sizeof(struct slob_rcu));
78536 INIT_RCU_HEAD(&slob_rcu->head);
78537- slob_rcu->size = c->size;
78538+ slob_rcu->size = size;
78539 call_rcu(&slob_rcu->head, kmem_rcu_free);
78540 } else {
78541- __kmem_cache_free(b, c->size);
78542+ __kmem_cache_free(b, size);
78543 }
78544
78545+#ifdef CONFIG_PAX_USERCOPY
78546+ trace_kfree(_RET_IP_, b);
78547+#else
78548 trace_kmem_cache_free(_RET_IP_, b);
78549+#endif
78550+
78551 }
78552 EXPORT_SYMBOL(kmem_cache_free);
78553
78554diff --git a/mm/slub.c b/mm/slub.c
78555index 4996fc7..87e01d0 100644
78556--- a/mm/slub.c
78557+++ b/mm/slub.c
78558@@ -201,7 +201,7 @@ struct track {
78559
78560 enum track_item { TRACK_ALLOC, TRACK_FREE };
78561
78562-#ifdef CONFIG_SLUB_DEBUG
78563+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78564 static int sysfs_slab_add(struct kmem_cache *);
78565 static int sysfs_slab_alias(struct kmem_cache *, const char *);
78566 static void sysfs_slab_remove(struct kmem_cache *);
78567@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
78568 if (!t->addr)
78569 return;
78570
78571- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
78572+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
78573 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
78574 }
78575
78576@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
78577
78578 page = virt_to_head_page(x);
78579
78580+ BUG_ON(!PageSlab(page));
78581+
78582 slab_free(s, page, x, _RET_IP_);
78583
78584 trace_kmem_cache_free(_RET_IP_, x);
78585@@ -1937,7 +1939,7 @@ static int slub_min_objects;
78586 * Merge control. If this is set then no merging of slab caches will occur.
78587 * (Could be removed. This was introduced to pacify the merge skeptics.)
78588 */
78589-static int slub_nomerge;
78590+static int slub_nomerge = 1;
78591
78592 /*
78593 * Calculate the order of allocation given an slab object size.
78594@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
78595 * list to avoid pounding the page allocator excessively.
78596 */
78597 set_min_partial(s, ilog2(s->size));
78598- s->refcount = 1;
78599+ atomic_set(&s->refcount, 1);
78600 #ifdef CONFIG_NUMA
78601 s->remote_node_defrag_ratio = 1000;
78602 #endif
78603@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
78604 void kmem_cache_destroy(struct kmem_cache *s)
78605 {
78606 down_write(&slub_lock);
78607- s->refcount--;
78608- if (!s->refcount) {
78609+ if (atomic_dec_and_test(&s->refcount)) {
78610 list_del(&s->list);
78611 up_write(&slub_lock);
78612 if (kmem_cache_close(s)) {
78613@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
78614 __setup("slub_nomerge", setup_slub_nomerge);
78615
78616 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
78617- const char *name, int size, gfp_t gfp_flags)
78618+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
78619 {
78620- unsigned int flags = 0;
78621-
78622 if (gfp_flags & SLUB_DMA)
78623- flags = SLAB_CACHE_DMA;
78624+ flags |= SLAB_CACHE_DMA;
78625
78626 /*
78627 * This function is called with IRQs disabled during early-boot on
78628@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
78629 EXPORT_SYMBOL(__kmalloc_node);
78630 #endif
78631
78632+void check_object_size(const void *ptr, unsigned long n, bool to)
78633+{
78634+
78635+#ifdef CONFIG_PAX_USERCOPY
78636+ struct page *page;
78637+ struct kmem_cache *s = NULL;
78638+ unsigned long offset;
78639+ const char *type;
78640+
78641+ if (!n)
78642+ return;
78643+
78644+ type = "<null>";
78645+ if (ZERO_OR_NULL_PTR(ptr))
78646+ goto report;
78647+
78648+ if (!virt_addr_valid(ptr))
78649+ return;
78650+
78651+ page = get_object_page(ptr);
78652+
78653+ type = "<process stack>";
78654+ if (!page) {
78655+ if (object_is_on_stack(ptr, n) == -1)
78656+ goto report;
78657+ return;
78658+ }
78659+
78660+ s = page->slab;
78661+ type = s->name;
78662+ if (!(s->flags & SLAB_USERCOPY))
78663+ goto report;
78664+
78665+ offset = (ptr - page_address(page)) % s->size;
78666+ if (offset <= s->objsize && n <= s->objsize - offset)
78667+ return;
78668+
78669+report:
78670+ pax_report_usercopy(ptr, n, to, type);
78671+#endif
78672+
78673+}
78674+EXPORT_SYMBOL(check_object_size);
78675+
78676 size_t ksize(const void *object)
78677 {
78678 struct page *page;
78679@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
78680 * kmem_cache_open for slab_state == DOWN.
78681 */
78682 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
78683- sizeof(struct kmem_cache_node), GFP_NOWAIT);
78684- kmalloc_caches[0].refcount = -1;
78685+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
78686+ atomic_set(&kmalloc_caches[0].refcount, -1);
78687 caches++;
78688
78689 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
78690@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
78691 /* Caches that are not of the two-to-the-power-of size */
78692 if (KMALLOC_MIN_SIZE <= 32) {
78693 create_kmalloc_cache(&kmalloc_caches[1],
78694- "kmalloc-96", 96, GFP_NOWAIT);
78695+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
78696 caches++;
78697 }
78698 if (KMALLOC_MIN_SIZE <= 64) {
78699 create_kmalloc_cache(&kmalloc_caches[2],
78700- "kmalloc-192", 192, GFP_NOWAIT);
78701+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
78702 caches++;
78703 }
78704
78705 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
78706 create_kmalloc_cache(&kmalloc_caches[i],
78707- "kmalloc", 1 << i, GFP_NOWAIT);
78708+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
78709 caches++;
78710 }
78711
78712@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
78713 /*
78714 * We may have set a slab to be unmergeable during bootstrap.
78715 */
78716- if (s->refcount < 0)
78717+ if (atomic_read(&s->refcount) < 0)
78718 return 1;
78719
78720 return 0;
78721@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78722 if (s) {
78723 int cpu;
78724
78725- s->refcount++;
78726+ atomic_inc(&s->refcount);
78727 /*
78728 * Adjust the object sizes so that we clear
78729 * the complete object on kzalloc.
78730@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78731
78732 if (sysfs_slab_alias(s, name)) {
78733 down_write(&slub_lock);
78734- s->refcount--;
78735+ atomic_dec(&s->refcount);
78736 up_write(&slub_lock);
78737 goto err;
78738 }
78739@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
78740
78741 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
78742 {
78743- return sprintf(buf, "%d\n", s->refcount - 1);
78744+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
78745 }
78746 SLAB_ATTR_RO(aliases);
78747
78748@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
78749 kfree(s);
78750 }
78751
78752-static struct sysfs_ops slab_sysfs_ops = {
78753+static const struct sysfs_ops slab_sysfs_ops = {
78754 .show = slab_attr_show,
78755 .store = slab_attr_store,
78756 };
78757@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
78758 return 0;
78759 }
78760
78761-static struct kset_uevent_ops slab_uevent_ops = {
78762+static const struct kset_uevent_ops slab_uevent_ops = {
78763 .filter = uevent_filter,
78764 };
78765
78766@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
78767 return name;
78768 }
78769
78770+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78771 static int sysfs_slab_add(struct kmem_cache *s)
78772 {
78773 int err;
78774@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
78775 kobject_del(&s->kobj);
78776 kobject_put(&s->kobj);
78777 }
78778+#endif
78779
78780 /*
78781 * Need to buffer aliases during bootup until sysfs becomes
78782@@ -4632,6 +4677,7 @@ struct saved_alias {
78783
78784 static struct saved_alias *alias_list;
78785
78786+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
78787 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
78788 {
78789 struct saved_alias *al;
78790@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
78791 alias_list = al;
78792 return 0;
78793 }
78794+#endif
78795
78796 static int __init slab_sysfs_init(void)
78797 {
78798@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
78799
78800 static int __init slab_proc_init(void)
78801 {
78802- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
78803+ mode_t gr_mode = S_IRUGO;
78804+
78805+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78806+ gr_mode = S_IRUSR;
78807+#endif
78808+
78809+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
78810 return 0;
78811 }
78812 module_init(slab_proc_init);
78813diff --git a/mm/swap.c b/mm/swap.c
78814index 308e57d..5de19c0 100644
78815--- a/mm/swap.c
78816+++ b/mm/swap.c
78817@@ -30,6 +30,7 @@
78818 #include <linux/notifier.h>
78819 #include <linux/backing-dev.h>
78820 #include <linux/memcontrol.h>
78821+#include <linux/hugetlb.h>
78822
78823 #include "internal.h"
78824
78825@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
78826 compound_page_dtor *dtor;
78827
78828 dtor = get_compound_page_dtor(page);
78829+ if (!PageHuge(page))
78830+ BUG_ON(dtor != free_compound_page);
78831 (*dtor)(page);
78832 }
78833 }
78834diff --git a/mm/util.c b/mm/util.c
78835index b377ce4..3a891af 100644
78836--- a/mm/util.c
78837+++ b/mm/util.c
78838@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
78839 void arch_pick_mmap_layout(struct mm_struct *mm)
78840 {
78841 mm->mmap_base = TASK_UNMAPPED_BASE;
78842+
78843+#ifdef CONFIG_PAX_RANDMMAP
78844+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78845+ mm->mmap_base += mm->delta_mmap;
78846+#endif
78847+
78848 mm->get_unmapped_area = arch_get_unmapped_area;
78849 mm->unmap_area = arch_unmap_area;
78850 }
78851diff --git a/mm/vmalloc.c b/mm/vmalloc.c
78852index f34ffd0..28e94b7 100644
78853--- a/mm/vmalloc.c
78854+++ b/mm/vmalloc.c
78855@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
78856
78857 pte = pte_offset_kernel(pmd, addr);
78858 do {
78859- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78860- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78861+
78862+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78863+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
78864+ BUG_ON(!pte_exec(*pte));
78865+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
78866+ continue;
78867+ }
78868+#endif
78869+
78870+ {
78871+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78872+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78873+ }
78874 } while (pte++, addr += PAGE_SIZE, addr != end);
78875 }
78876
78877@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78878 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
78879 {
78880 pte_t *pte;
78881+ int ret = -ENOMEM;
78882
78883 /*
78884 * nr is a running index into the array which helps higher level
78885@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78886 pte = pte_alloc_kernel(pmd, addr);
78887 if (!pte)
78888 return -ENOMEM;
78889+
78890+ pax_open_kernel();
78891 do {
78892 struct page *page = pages[*nr];
78893
78894- if (WARN_ON(!pte_none(*pte)))
78895- return -EBUSY;
78896- if (WARN_ON(!page))
78897- return -ENOMEM;
78898+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78899+ if (!(pgprot_val(prot) & _PAGE_NX))
78900+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
78901+ else
78902+#endif
78903+
78904+ if (WARN_ON(!pte_none(*pte))) {
78905+ ret = -EBUSY;
78906+ goto out;
78907+ }
78908+ if (WARN_ON(!page)) {
78909+ ret = -ENOMEM;
78910+ goto out;
78911+ }
78912 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
78913 (*nr)++;
78914 } while (pte++, addr += PAGE_SIZE, addr != end);
78915- return 0;
78916+ ret = 0;
78917+out:
78918+ pax_close_kernel();
78919+ return ret;
78920 }
78921
78922 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
78923@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
78924 * and fall back on vmalloc() if that fails. Others
78925 * just put it in the vmalloc space.
78926 */
78927-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
78928+#ifdef CONFIG_MODULES
78929+#ifdef MODULES_VADDR
78930 unsigned long addr = (unsigned long)x;
78931 if (addr >= MODULES_VADDR && addr < MODULES_END)
78932 return 1;
78933 #endif
78934+
78935+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78936+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
78937+ return 1;
78938+#endif
78939+
78940+#endif
78941+
78942 return is_vmalloc_addr(x);
78943 }
78944
78945@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
78946
78947 if (!pgd_none(*pgd)) {
78948 pud_t *pud = pud_offset(pgd, addr);
78949+#ifdef CONFIG_X86
78950+ if (!pud_large(*pud))
78951+#endif
78952 if (!pud_none(*pud)) {
78953 pmd_t *pmd = pmd_offset(pud, addr);
78954+#ifdef CONFIG_X86
78955+ if (!pmd_large(*pmd))
78956+#endif
78957 if (!pmd_none(*pmd)) {
78958 pte_t *ptep, pte;
78959
78960@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
78961 struct rb_node *tmp;
78962
78963 while (*p) {
78964- struct vmap_area *tmp;
78965+ struct vmap_area *varea;
78966
78967 parent = *p;
78968- tmp = rb_entry(parent, struct vmap_area, rb_node);
78969- if (va->va_start < tmp->va_end)
78970+ varea = rb_entry(parent, struct vmap_area, rb_node);
78971+ if (va->va_start < varea->va_end)
78972 p = &(*p)->rb_left;
78973- else if (va->va_end > tmp->va_start)
78974+ else if (va->va_end > varea->va_start)
78975 p = &(*p)->rb_right;
78976 else
78977 BUG();
78978@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
78979 struct vm_struct *area;
78980
78981 BUG_ON(in_interrupt());
78982+
78983+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78984+ if (flags & VM_KERNEXEC) {
78985+ if (start != VMALLOC_START || end != VMALLOC_END)
78986+ return NULL;
78987+ start = (unsigned long)MODULES_EXEC_VADDR;
78988+ end = (unsigned long)MODULES_EXEC_END;
78989+ }
78990+#endif
78991+
78992 if (flags & VM_IOREMAP) {
78993 int bit = fls(size);
78994
78995@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
78996 if (count > totalram_pages)
78997 return NULL;
78998
78999+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79000+ if (!(pgprot_val(prot) & _PAGE_NX))
79001+ flags |= VM_KERNEXEC;
79002+#endif
79003+
79004 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
79005 __builtin_return_address(0));
79006 if (!area)
79007@@ -1594,6 +1651,13 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79008 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
79009 return NULL;
79010
79011+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79012+ if (!(pgprot_val(prot) & _PAGE_NX))
79013+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
79014+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
79015+ else
79016+#endif
79017+
79018 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
79019 VMALLOC_START, VMALLOC_END, node,
79020 gfp_mask, caller);
79021@@ -1619,6 +1683,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79022 return addr;
79023 }
79024
79025+#undef __vmalloc
79026 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
79027 {
79028 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
79029@@ -1635,6 +1700,7 @@ EXPORT_SYMBOL(__vmalloc);
79030 * For tight control over page level allocator and protection flags
79031 * use __vmalloc() instead.
79032 */
79033+#undef vmalloc
79034 void *vmalloc(unsigned long size)
79035 {
79036 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79037@@ -1649,6 +1715,7 @@ EXPORT_SYMBOL(vmalloc);
79038 * The resulting memory area is zeroed so it can be mapped to userspace
79039 * without leaking data.
79040 */
79041+#undef vmalloc_user
79042 void *vmalloc_user(unsigned long size)
79043 {
79044 struct vm_struct *area;
79045@@ -1676,6 +1743,7 @@ EXPORT_SYMBOL(vmalloc_user);
79046 * For tight control over page level allocator and protection flags
79047 * use __vmalloc() instead.
79048 */
79049+#undef vmalloc_node
79050 void *vmalloc_node(unsigned long size, int node)
79051 {
79052 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79053@@ -1698,10 +1766,10 @@ EXPORT_SYMBOL(vmalloc_node);
79054 * For tight control over page level allocator and protection flags
79055 * use __vmalloc() instead.
79056 */
79057-
79058+#undef vmalloc_exec
79059 void *vmalloc_exec(unsigned long size)
79060 {
79061- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
79062+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
79063 -1, __builtin_return_address(0));
79064 }
79065
79066@@ -1720,6 +1788,7 @@ void *vmalloc_exec(unsigned long size)
79067 * Allocate enough 32bit PA addressable pages to cover @size from the
79068 * page level allocator and map them into contiguous kernel virtual space.
79069 */
79070+#undef vmalloc_32
79071 void *vmalloc_32(unsigned long size)
79072 {
79073 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
79074@@ -1734,6 +1803,7 @@ EXPORT_SYMBOL(vmalloc_32);
79075 * The resulting memory area is 32bit addressable and zeroed so it can be
79076 * mapped to userspace without leaking data.
79077 */
79078+#undef vmalloc_32_user
79079 void *vmalloc_32_user(unsigned long size)
79080 {
79081 struct vm_struct *area;
79082@@ -1998,6 +2068,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
79083 unsigned long uaddr = vma->vm_start;
79084 unsigned long usize = vma->vm_end - vma->vm_start;
79085
79086+ BUG_ON(vma->vm_mirror);
79087+
79088 if ((PAGE_SIZE-1) & (unsigned long)addr)
79089 return -EINVAL;
79090
79091diff --git a/mm/vmstat.c b/mm/vmstat.c
79092index 42d76c6..5643dc4 100644
79093--- a/mm/vmstat.c
79094+++ b/mm/vmstat.c
79095@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
79096 *
79097 * vm_stat contains the global counters
79098 */
79099-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79100+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79101 EXPORT_SYMBOL(vm_stat);
79102
79103 #ifdef CONFIG_SMP
79104@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
79105 v = p->vm_stat_diff[i];
79106 p->vm_stat_diff[i] = 0;
79107 local_irq_restore(flags);
79108- atomic_long_add(v, &zone->vm_stat[i]);
79109+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79110 global_diff[i] += v;
79111 #ifdef CONFIG_NUMA
79112 /* 3 seconds idle till flush */
79113@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
79114
79115 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
79116 if (global_diff[i])
79117- atomic_long_add(global_diff[i], &vm_stat[i]);
79118+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
79119 }
79120
79121 #endif
79122@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
79123 start_cpu_timer(cpu);
79124 #endif
79125 #ifdef CONFIG_PROC_FS
79126- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
79127- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
79128- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
79129- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
79130+ {
79131+ mode_t gr_mode = S_IRUGO;
79132+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79133+ gr_mode = S_IRUSR;
79134+#endif
79135+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
79136+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
79137+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79138+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
79139+#else
79140+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
79141+#endif
79142+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
79143+ }
79144 #endif
79145 return 0;
79146 }
79147diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
79148index a29c5ab..6143f20 100644
79149--- a/net/8021q/vlan.c
79150+++ b/net/8021q/vlan.c
79151@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
79152 err = -EPERM;
79153 if (!capable(CAP_NET_ADMIN))
79154 break;
79155- if ((args.u.name_type >= 0) &&
79156- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
79157+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
79158 struct vlan_net *vn;
79159
79160 vn = net_generic(net, vlan_net_id);
79161diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
79162index a2d2984..f9eb711 100644
79163--- a/net/9p/trans_fd.c
79164+++ b/net/9p/trans_fd.c
79165@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
79166 oldfs = get_fs();
79167 set_fs(get_ds());
79168 /* The cast to a user pointer is valid due to the set_fs() */
79169- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
79170+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
79171 set_fs(oldfs);
79172
79173 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
79174diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
79175index 02cc7e7..4514f1b 100644
79176--- a/net/atm/atm_misc.c
79177+++ b/net/atm/atm_misc.c
79178@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
79179 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
79180 return 1;
79181 atm_return(vcc,truesize);
79182- atomic_inc(&vcc->stats->rx_drop);
79183+ atomic_inc_unchecked(&vcc->stats->rx_drop);
79184 return 0;
79185 }
79186
79187@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
79188 }
79189 }
79190 atm_return(vcc,guess);
79191- atomic_inc(&vcc->stats->rx_drop);
79192+ atomic_inc_unchecked(&vcc->stats->rx_drop);
79193 return NULL;
79194 }
79195
79196@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
79197
79198 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79199 {
79200-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79201+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79202 __SONET_ITEMS
79203 #undef __HANDLE_ITEM
79204 }
79205@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79206
79207 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79208 {
79209-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
79210+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
79211 __SONET_ITEMS
79212 #undef __HANDLE_ITEM
79213 }
79214diff --git a/net/atm/lec.h b/net/atm/lec.h
79215index 9d14d19..5c145f3 100644
79216--- a/net/atm/lec.h
79217+++ b/net/atm/lec.h
79218@@ -48,7 +48,7 @@ struct lane2_ops {
79219 const u8 *tlvs, u32 sizeoftlvs);
79220 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
79221 const u8 *tlvs, u32 sizeoftlvs);
79222-};
79223+} __no_const;
79224
79225 /*
79226 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
79227diff --git a/net/atm/mpc.h b/net/atm/mpc.h
79228index 0919a88..a23d54e 100644
79229--- a/net/atm/mpc.h
79230+++ b/net/atm/mpc.h
79231@@ -33,7 +33,7 @@ struct mpoa_client {
79232 struct mpc_parameters parameters; /* parameters for this client */
79233
79234 const struct net_device_ops *old_ops;
79235- struct net_device_ops new_ops;
79236+ net_device_ops_no_const new_ops;
79237 };
79238
79239
79240diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
79241index 4504a4b..1733f1e 100644
79242--- a/net/atm/mpoa_caches.c
79243+++ b/net/atm/mpoa_caches.c
79244@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
79245 struct timeval now;
79246 struct k_message msg;
79247
79248+ pax_track_stack();
79249+
79250 do_gettimeofday(&now);
79251
79252 write_lock_irq(&client->egress_lock);
79253diff --git a/net/atm/proc.c b/net/atm/proc.c
79254index ab8419a..aa91497 100644
79255--- a/net/atm/proc.c
79256+++ b/net/atm/proc.c
79257@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
79258 const struct k_atm_aal_stats *stats)
79259 {
79260 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
79261- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
79262- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
79263- atomic_read(&stats->rx_drop));
79264+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
79265+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
79266+ atomic_read_unchecked(&stats->rx_drop));
79267 }
79268
79269 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
79270@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
79271 {
79272 struct sock *sk = sk_atm(vcc);
79273
79274+#ifdef CONFIG_GRKERNSEC_HIDESYM
79275+ seq_printf(seq, "%p ", NULL);
79276+#else
79277 seq_printf(seq, "%p ", vcc);
79278+#endif
79279+
79280 if (!vcc->dev)
79281 seq_printf(seq, "Unassigned ");
79282 else
79283@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
79284 {
79285 if (!vcc->dev)
79286 seq_printf(seq, sizeof(void *) == 4 ?
79287+#ifdef CONFIG_GRKERNSEC_HIDESYM
79288+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
79289+#else
79290 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
79291+#endif
79292 else
79293 seq_printf(seq, "%3d %3d %5d ",
79294 vcc->dev->number, vcc->vpi, vcc->vci);
79295diff --git a/net/atm/resources.c b/net/atm/resources.c
79296index 56b7322..c48b84e 100644
79297--- a/net/atm/resources.c
79298+++ b/net/atm/resources.c
79299@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
79300 static void copy_aal_stats(struct k_atm_aal_stats *from,
79301 struct atm_aal_stats *to)
79302 {
79303-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79304+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79305 __AAL_STAT_ITEMS
79306 #undef __HANDLE_ITEM
79307 }
79308@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
79309 static void subtract_aal_stats(struct k_atm_aal_stats *from,
79310 struct atm_aal_stats *to)
79311 {
79312-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
79313+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
79314 __AAL_STAT_ITEMS
79315 #undef __HANDLE_ITEM
79316 }
79317diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
79318index 8567d47..bba2292 100644
79319--- a/net/bridge/br_private.h
79320+++ b/net/bridge/br_private.h
79321@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
79322
79323 #ifdef CONFIG_SYSFS
79324 /* br_sysfs_if.c */
79325-extern struct sysfs_ops brport_sysfs_ops;
79326+extern const struct sysfs_ops brport_sysfs_ops;
79327 extern int br_sysfs_addif(struct net_bridge_port *p);
79328
79329 /* br_sysfs_br.c */
79330diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
79331index 9a52ac5..c97538e 100644
79332--- a/net/bridge/br_stp_if.c
79333+++ b/net/bridge/br_stp_if.c
79334@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
79335 char *envp[] = { NULL };
79336
79337 if (br->stp_enabled == BR_USER_STP) {
79338- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
79339+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
79340 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
79341 br->dev->name, r);
79342
79343diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
79344index 820643a..ce77fb3 100644
79345--- a/net/bridge/br_sysfs_if.c
79346+++ b/net/bridge/br_sysfs_if.c
79347@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
79348 return ret;
79349 }
79350
79351-struct sysfs_ops brport_sysfs_ops = {
79352+const struct sysfs_ops brport_sysfs_ops = {
79353 .show = brport_show,
79354 .store = brport_store,
79355 };
79356diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
79357index d73d47f..72df42a 100644
79358--- a/net/bridge/netfilter/ebtables.c
79359+++ b/net/bridge/netfilter/ebtables.c
79360@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
79361 unsigned int entries_size, nentries;
79362 char *entries;
79363
79364+ pax_track_stack();
79365+
79366 if (cmd == EBT_SO_GET_ENTRIES) {
79367 entries_size = t->private->entries_size;
79368 nentries = t->private->nentries;
79369diff --git a/net/can/bcm.c b/net/can/bcm.c
79370index 2ffd2e0..72a7486 100644
79371--- a/net/can/bcm.c
79372+++ b/net/can/bcm.c
79373@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
79374 struct bcm_sock *bo = bcm_sk(sk);
79375 struct bcm_op *op;
79376
79377+#ifdef CONFIG_GRKERNSEC_HIDESYM
79378+ seq_printf(m, ">>> socket %p", NULL);
79379+ seq_printf(m, " / sk %p", NULL);
79380+ seq_printf(m, " / bo %p", NULL);
79381+#else
79382 seq_printf(m, ">>> socket %p", sk->sk_socket);
79383 seq_printf(m, " / sk %p", sk);
79384 seq_printf(m, " / bo %p", bo);
79385+#endif
79386 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
79387 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
79388 seq_printf(m, " <<<\n");
79389diff --git a/net/compat.c b/net/compat.c
79390index 9559afc..ccd74e1 100644
79391--- a/net/compat.c
79392+++ b/net/compat.c
79393@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
79394 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
79395 __get_user(kmsg->msg_flags, &umsg->msg_flags))
79396 return -EFAULT;
79397- kmsg->msg_name = compat_ptr(tmp1);
79398- kmsg->msg_iov = compat_ptr(tmp2);
79399- kmsg->msg_control = compat_ptr(tmp3);
79400+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
79401+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
79402+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
79403 return 0;
79404 }
79405
79406@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79407 kern_msg->msg_name = NULL;
79408
79409 tot_len = iov_from_user_compat_to_kern(kern_iov,
79410- (struct compat_iovec __user *)kern_msg->msg_iov,
79411+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
79412 kern_msg->msg_iovlen);
79413 if (tot_len >= 0)
79414 kern_msg->msg_iov = kern_iov;
79415@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79416
79417 #define CMSG_COMPAT_FIRSTHDR(msg) \
79418 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
79419- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
79420+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
79421 (struct compat_cmsghdr __user *)NULL)
79422
79423 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
79424 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
79425 (ucmlen) <= (unsigned long) \
79426 ((mhdr)->msg_controllen - \
79427- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
79428+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
79429
79430 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
79431 struct compat_cmsghdr __user *cmsg, int cmsg_len)
79432 {
79433 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
79434- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
79435+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
79436 msg->msg_controllen)
79437 return NULL;
79438 return (struct compat_cmsghdr __user *)ptr;
79439@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79440 {
79441 struct compat_timeval ctv;
79442 struct compat_timespec cts[3];
79443- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79444+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79445 struct compat_cmsghdr cmhdr;
79446 int cmlen;
79447
79448@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79449
79450 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
79451 {
79452- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79453+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79454 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
79455 int fdnum = scm->fp->count;
79456 struct file **fp = scm->fp->fp;
79457@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
79458 len = sizeof(ktime);
79459 old_fs = get_fs();
79460 set_fs(KERNEL_DS);
79461- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
79462+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
79463 set_fs(old_fs);
79464
79465 if (!err) {
79466@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79467 case MCAST_JOIN_GROUP:
79468 case MCAST_LEAVE_GROUP:
79469 {
79470- struct compat_group_req __user *gr32 = (void *)optval;
79471+ struct compat_group_req __user *gr32 = (void __user *)optval;
79472 struct group_req __user *kgr =
79473 compat_alloc_user_space(sizeof(struct group_req));
79474 u32 interface;
79475@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79476 case MCAST_BLOCK_SOURCE:
79477 case MCAST_UNBLOCK_SOURCE:
79478 {
79479- struct compat_group_source_req __user *gsr32 = (void *)optval;
79480+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
79481 struct group_source_req __user *kgsr = compat_alloc_user_space(
79482 sizeof(struct group_source_req));
79483 u32 interface;
79484@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79485 }
79486 case MCAST_MSFILTER:
79487 {
79488- struct compat_group_filter __user *gf32 = (void *)optval;
79489+ struct compat_group_filter __user *gf32 = (void __user *)optval;
79490 struct group_filter __user *kgf;
79491 u32 interface, fmode, numsrc;
79492
79493diff --git a/net/core/dev.c b/net/core/dev.c
79494index 64eb849..7b5948b 100644
79495--- a/net/core/dev.c
79496+++ b/net/core/dev.c
79497@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
79498 if (no_module && capable(CAP_NET_ADMIN))
79499 no_module = request_module("netdev-%s", name);
79500 if (no_module && capable(CAP_SYS_MODULE)) {
79501+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79502+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
79503+#else
79504 if (!request_module("%s", name))
79505 pr_err("Loading kernel module for a network device "
79506 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
79507 "instead\n", name);
79508+#endif
79509 }
79510 }
79511 EXPORT_SYMBOL(dev_load);
79512@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
79513
79514 struct dev_gso_cb {
79515 void (*destructor)(struct sk_buff *skb);
79516-};
79517+} __no_const;
79518
79519 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
79520
79521@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
79522 }
79523 EXPORT_SYMBOL(netif_rx_ni);
79524
79525-static void net_tx_action(struct softirq_action *h)
79526+static void net_tx_action(void)
79527 {
79528 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79529
79530@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *napi)
79531 EXPORT_SYMBOL(netif_napi_del);
79532
79533
79534-static void net_rx_action(struct softirq_action *h)
79535+static void net_rx_action(void)
79536 {
79537 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
79538 unsigned long time_limit = jiffies + 2;
79539diff --git a/net/core/flow.c b/net/core/flow.c
79540index 9601587..8c4824e 100644
79541--- a/net/core/flow.c
79542+++ b/net/core/flow.c
79543@@ -35,11 +35,11 @@ struct flow_cache_entry {
79544 atomic_t *object_ref;
79545 };
79546
79547-atomic_t flow_cache_genid = ATOMIC_INIT(0);
79548+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
79549
79550 static u32 flow_hash_shift;
79551 #define flow_hash_size (1 << flow_hash_shift)
79552-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
79553+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
79554
79555 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
79556
79557@@ -52,7 +52,7 @@ struct flow_percpu_info {
79558 u32 hash_rnd;
79559 int count;
79560 };
79561-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
79562+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
79563
79564 #define flow_hash_rnd_recalc(cpu) \
79565 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
79566@@ -69,7 +69,7 @@ struct flow_flush_info {
79567 atomic_t cpuleft;
79568 struct completion completion;
79569 };
79570-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
79571+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
79572
79573 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
79574
79575@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
79576 if (fle->family == family &&
79577 fle->dir == dir &&
79578 flow_key_compare(key, &fle->key) == 0) {
79579- if (fle->genid == atomic_read(&flow_cache_genid)) {
79580+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
79581 void *ret = fle->object;
79582
79583 if (ret)
79584@@ -228,7 +228,7 @@ nocache:
79585 err = resolver(net, key, family, dir, &obj, &obj_ref);
79586
79587 if (fle && !err) {
79588- fle->genid = atomic_read(&flow_cache_genid);
79589+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
79590
79591 if (fle->object)
79592 atomic_dec(fle->object_ref);
79593@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
79594
79595 fle = flow_table(cpu)[i];
79596 for (; fle; fle = fle->next) {
79597- unsigned genid = atomic_read(&flow_cache_genid);
79598+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
79599
79600 if (!fle->object || fle->genid == genid)
79601 continue;
79602diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
79603index d4fd895..ac9b1e6 100644
79604--- a/net/core/rtnetlink.c
79605+++ b/net/core/rtnetlink.c
79606@@ -57,7 +57,7 @@ struct rtnl_link
79607 {
79608 rtnl_doit_func doit;
79609 rtnl_dumpit_func dumpit;
79610-};
79611+} __no_const;
79612
79613 static DEFINE_MUTEX(rtnl_mutex);
79614
79615diff --git a/net/core/scm.c b/net/core/scm.c
79616index d98eafc..1a190a9 100644
79617--- a/net/core/scm.c
79618+++ b/net/core/scm.c
79619@@ -191,7 +191,7 @@ error:
79620 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79621 {
79622 struct cmsghdr __user *cm
79623- = (__force struct cmsghdr __user *)msg->msg_control;
79624+ = (struct cmsghdr __force_user *)msg->msg_control;
79625 struct cmsghdr cmhdr;
79626 int cmlen = CMSG_LEN(len);
79627 int err;
79628@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79629 err = -EFAULT;
79630 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
79631 goto out;
79632- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
79633+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
79634 goto out;
79635 cmlen = CMSG_SPACE(len);
79636 if (msg->msg_controllen < cmlen)
79637@@ -229,7 +229,7 @@ out:
79638 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79639 {
79640 struct cmsghdr __user *cm
79641- = (__force struct cmsghdr __user*)msg->msg_control;
79642+ = (struct cmsghdr __force_user *)msg->msg_control;
79643
79644 int fdmax = 0;
79645 int fdnum = scm->fp->count;
79646@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79647 if (fdnum < fdmax)
79648 fdmax = fdnum;
79649
79650- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
79651+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
79652 i++, cmfptr++)
79653 {
79654 int new_fd;
79655diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
79656index 45329d7..626aaa6 100644
79657--- a/net/core/secure_seq.c
79658+++ b/net/core/secure_seq.c
79659@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
79660 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
79661
79662 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
79663- __be16 dport)
79664+ __be16 dport)
79665 {
79666 u32 secret[MD5_MESSAGE_BYTES / 4];
79667 u32 hash[MD5_DIGEST_WORDS];
79668@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
79669 secret[i] = net_secret[i];
79670
79671 md5_transform(hash, secret);
79672-
79673 return hash[0];
79674 }
79675 #endif
79676diff --git a/net/core/skbuff.c b/net/core/skbuff.c
79677index a807f8c..65f906f 100644
79678--- a/net/core/skbuff.c
79679+++ b/net/core/skbuff.c
79680@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
79681 struct sk_buff *frag_iter;
79682 struct sock *sk = skb->sk;
79683
79684+ pax_track_stack();
79685+
79686 /*
79687 * __skb_splice_bits() only fails if the output has no room left,
79688 * so no point in going over the frag_list for the error case.
79689diff --git a/net/core/sock.c b/net/core/sock.c
79690index 6605e75..3acebda 100644
79691--- a/net/core/sock.c
79692+++ b/net/core/sock.c
79693@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79694 break;
79695
79696 case SO_PEERCRED:
79697+ {
79698+ struct ucred peercred;
79699 if (len > sizeof(sk->sk_peercred))
79700 len = sizeof(sk->sk_peercred);
79701- if (copy_to_user(optval, &sk->sk_peercred, len))
79702+ peercred = sk->sk_peercred;
79703+ if (copy_to_user(optval, &peercred, len))
79704 return -EFAULT;
79705 goto lenout;
79706+ }
79707
79708 case SO_PEERNAME:
79709 {
79710@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79711 */
79712 smp_wmb();
79713 atomic_set(&sk->sk_refcnt, 1);
79714- atomic_set(&sk->sk_drops, 0);
79715+ atomic_set_unchecked(&sk->sk_drops, 0);
79716 }
79717 EXPORT_SYMBOL(sock_init_data);
79718
79719diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79720index 2036568..c55883d 100644
79721--- a/net/decnet/sysctl_net_decnet.c
79722+++ b/net/decnet/sysctl_net_decnet.c
79723@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79724
79725 if (len > *lenp) len = *lenp;
79726
79727- if (copy_to_user(buffer, addr, len))
79728+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
79729 return -EFAULT;
79730
79731 *lenp = len;
79732@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79733
79734 if (len > *lenp) len = *lenp;
79735
79736- if (copy_to_user(buffer, devname, len))
79737+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
79738 return -EFAULT;
79739
79740 *lenp = len;
79741diff --git a/net/econet/Kconfig b/net/econet/Kconfig
79742index 39a2d29..f39c0fe 100644
79743--- a/net/econet/Kconfig
79744+++ b/net/econet/Kconfig
79745@@ -4,7 +4,7 @@
79746
79747 config ECONET
79748 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
79749- depends on EXPERIMENTAL && INET
79750+ depends on EXPERIMENTAL && INET && BROKEN
79751 ---help---
79752 Econet is a fairly old and slow networking protocol mainly used by
79753 Acorn computers to access file and print servers. It uses native
79754diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
79755index a413b1b..380849c 100644
79756--- a/net/ieee802154/dgram.c
79757+++ b/net/ieee802154/dgram.c
79758@@ -318,7 +318,7 @@ out:
79759 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
79760 {
79761 if (sock_queue_rcv_skb(sk, skb) < 0) {
79762- atomic_inc(&sk->sk_drops);
79763+ atomic_inc_unchecked(&sk->sk_drops);
79764 kfree_skb(skb);
79765 return NET_RX_DROP;
79766 }
79767diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
79768index 30e74ee..bfc6ee0 100644
79769--- a/net/ieee802154/raw.c
79770+++ b/net/ieee802154/raw.c
79771@@ -206,7 +206,7 @@ out:
79772 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79773 {
79774 if (sock_queue_rcv_skb(sk, skb) < 0) {
79775- atomic_inc(&sk->sk_drops);
79776+ atomic_inc_unchecked(&sk->sk_drops);
79777 kfree_skb(skb);
79778 return NET_RX_DROP;
79779 }
79780diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
79781index dba56d2..acee5d6 100644
79782--- a/net/ipv4/inet_diag.c
79783+++ b/net/ipv4/inet_diag.c
79784@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
79785 r->idiag_retrans = 0;
79786
79787 r->id.idiag_if = sk->sk_bound_dev_if;
79788+#ifdef CONFIG_GRKERNSEC_HIDESYM
79789+ r->id.idiag_cookie[0] = 0;
79790+ r->id.idiag_cookie[1] = 0;
79791+#else
79792 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
79793 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79794+#endif
79795
79796 r->id.idiag_sport = inet->sport;
79797 r->id.idiag_dport = inet->dport;
79798@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
79799 r->idiag_family = tw->tw_family;
79800 r->idiag_retrans = 0;
79801 r->id.idiag_if = tw->tw_bound_dev_if;
79802+
79803+#ifdef CONFIG_GRKERNSEC_HIDESYM
79804+ r->id.idiag_cookie[0] = 0;
79805+ r->id.idiag_cookie[1] = 0;
79806+#else
79807 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
79808 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
79809+#endif
79810+
79811 r->id.idiag_sport = tw->tw_sport;
79812 r->id.idiag_dport = tw->tw_dport;
79813 r->id.idiag_src[0] = tw->tw_rcv_saddr;
79814@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
79815 if (sk == NULL)
79816 goto unlock;
79817
79818+#ifndef CONFIG_GRKERNSEC_HIDESYM
79819 err = -ESTALE;
79820 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
79821 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
79822 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
79823 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
79824 goto out;
79825+#endif
79826
79827 err = -ENOMEM;
79828 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
79829@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
79830 r->idiag_retrans = req->retrans;
79831
79832 r->id.idiag_if = sk->sk_bound_dev_if;
79833+
79834+#ifdef CONFIG_GRKERNSEC_HIDESYM
79835+ r->id.idiag_cookie[0] = 0;
79836+ r->id.idiag_cookie[1] = 0;
79837+#else
79838 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
79839 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
79840+#endif
79841
79842 tmo = req->expires - jiffies;
79843 if (tmo < 0)
79844diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79845index d717267..56de7e7 100644
79846--- a/net/ipv4/inet_hashtables.c
79847+++ b/net/ipv4/inet_hashtables.c
79848@@ -18,12 +18,15 @@
79849 #include <linux/sched.h>
79850 #include <linux/slab.h>
79851 #include <linux/wait.h>
79852+#include <linux/security.h>
79853
79854 #include <net/inet_connection_sock.h>
79855 #include <net/inet_hashtables.h>
79856 #include <net/secure_seq.h>
79857 #include <net/ip.h>
79858
79859+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79860+
79861 /*
79862 * Allocate and initialize a new local port bind bucket.
79863 * The bindhash mutex for snum's hash chain must be held here.
79864@@ -491,6 +494,8 @@ ok:
79865 }
79866 spin_unlock(&head->lock);
79867
79868+ gr_update_task_in_ip_table(current, inet_sk(sk));
79869+
79870 if (tw) {
79871 inet_twsk_deschedule(tw, death_row);
79872 inet_twsk_put(tw);
79873diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79874index 13b229f..6956484 100644
79875--- a/net/ipv4/inetpeer.c
79876+++ b/net/ipv4/inetpeer.c
79877@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
79878 struct inet_peer *p, *n;
79879 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
79880
79881+ pax_track_stack();
79882+
79883 /* Look up for the address quickly. */
79884 read_lock_bh(&peer_pool_lock);
79885 p = lookup(daddr, NULL);
79886@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
79887 return NULL;
79888 n->v4daddr = daddr;
79889 atomic_set(&n->refcnt, 1);
79890- atomic_set(&n->rid, 0);
79891+ atomic_set_unchecked(&n->rid, 0);
79892 n->ip_id_count = secure_ip_id(daddr);
79893 n->tcp_ts_stamp = 0;
79894
79895diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
79896index d3fe10b..feeafc9 100644
79897--- a/net/ipv4/ip_fragment.c
79898+++ b/net/ipv4/ip_fragment.c
79899@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
79900 return 0;
79901
79902 start = qp->rid;
79903- end = atomic_inc_return(&peer->rid);
79904+ end = atomic_inc_return_unchecked(&peer->rid);
79905 qp->rid = end;
79906
79907 rc = qp->q.fragments && (end - start) > max;
79908diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
79909index e982b5c..f079d75 100644
79910--- a/net/ipv4/ip_sockglue.c
79911+++ b/net/ipv4/ip_sockglue.c
79912@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79913 int val;
79914 int len;
79915
79916+ pax_track_stack();
79917+
79918 if (level != SOL_IP)
79919 return -EOPNOTSUPP;
79920
79921@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79922 if (sk->sk_type != SOCK_STREAM)
79923 return -ENOPROTOOPT;
79924
79925- msg.msg_control = optval;
79926+ msg.msg_control = (void __force_kernel *)optval;
79927 msg.msg_controllen = len;
79928 msg.msg_flags = 0;
79929
79930diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
79931index f8d04c2..c1188f2 100644
79932--- a/net/ipv4/ipconfig.c
79933+++ b/net/ipv4/ipconfig.c
79934@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
79935
79936 mm_segment_t oldfs = get_fs();
79937 set_fs(get_ds());
79938- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79939+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79940 set_fs(oldfs);
79941 return res;
79942 }
79943@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
79944
79945 mm_segment_t oldfs = get_fs();
79946 set_fs(get_ds());
79947- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79948+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79949 set_fs(oldfs);
79950 return res;
79951 }
79952@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
79953
79954 mm_segment_t oldfs = get_fs();
79955 set_fs(get_ds());
79956- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
79957+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
79958 set_fs(oldfs);
79959 return res;
79960 }
79961diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
79962index c8b0cc3..4da5ae2 100644
79963--- a/net/ipv4/netfilter/arp_tables.c
79964+++ b/net/ipv4/netfilter/arp_tables.c
79965@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
79966 private = &tmp;
79967 }
79968 #endif
79969+ memset(&info, 0, sizeof(info));
79970 info.valid_hooks = t->valid_hooks;
79971 memcpy(info.hook_entry, private->hook_entry,
79972 sizeof(info.hook_entry));
79973diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
79974index c156db2..e772975 100644
79975--- a/net/ipv4/netfilter/ip_queue.c
79976+++ b/net/ipv4/netfilter/ip_queue.c
79977@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
79978
79979 if (v->data_len < sizeof(*user_iph))
79980 return 0;
79981+ if (v->data_len > 65535)
79982+ return -EMSGSIZE;
79983+
79984 diff = v->data_len - e->skb->len;
79985 if (diff < 0) {
79986 if (pskb_trim(e->skb, v->data_len))
79987@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
79988 static inline void
79989 __ipq_rcv_skb(struct sk_buff *skb)
79990 {
79991- int status, type, pid, flags, nlmsglen, skblen;
79992+ int status, type, pid, flags;
79993+ unsigned int nlmsglen, skblen;
79994 struct nlmsghdr *nlh;
79995
79996 skblen = skb->len;
79997diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
79998index 0606db1..02e7e4c 100644
79999--- a/net/ipv4/netfilter/ip_tables.c
80000+++ b/net/ipv4/netfilter/ip_tables.c
80001@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80002 private = &tmp;
80003 }
80004 #endif
80005+ memset(&info, 0, sizeof(info));
80006 info.valid_hooks = t->valid_hooks;
80007 memcpy(info.hook_entry, private->hook_entry,
80008 sizeof(info.hook_entry));
80009diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80010index d9521f6..3c3eb25 100644
80011--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
80012+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80013@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
80014
80015 *len = 0;
80016
80017- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
80018+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
80019 if (*octets == NULL) {
80020 if (net_ratelimit())
80021 printk("OOM in bsalg (%d)\n", __LINE__);
80022diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
80023index ab996f9..3da5f96 100644
80024--- a/net/ipv4/raw.c
80025+++ b/net/ipv4/raw.c
80026@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80027 /* Charge it to the socket. */
80028
80029 if (sock_queue_rcv_skb(sk, skb) < 0) {
80030- atomic_inc(&sk->sk_drops);
80031+ atomic_inc_unchecked(&sk->sk_drops);
80032 kfree_skb(skb);
80033 return NET_RX_DROP;
80034 }
80035@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80036 int raw_rcv(struct sock *sk, struct sk_buff *skb)
80037 {
80038 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
80039- atomic_inc(&sk->sk_drops);
80040+ atomic_inc_unchecked(&sk->sk_drops);
80041 kfree_skb(skb);
80042 return NET_RX_DROP;
80043 }
80044@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
80045
80046 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
80047 {
80048+ struct icmp_filter filter;
80049+
80050+ if (optlen < 0)
80051+ return -EINVAL;
80052 if (optlen > sizeof(struct icmp_filter))
80053 optlen = sizeof(struct icmp_filter);
80054- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
80055+ if (copy_from_user(&filter, optval, optlen))
80056 return -EFAULT;
80057+ raw_sk(sk)->filter = filter;
80058+
80059 return 0;
80060 }
80061
80062 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
80063 {
80064 int len, ret = -EFAULT;
80065+ struct icmp_filter filter;
80066
80067 if (get_user(len, optlen))
80068 goto out;
80069@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
80070 if (len > sizeof(struct icmp_filter))
80071 len = sizeof(struct icmp_filter);
80072 ret = -EFAULT;
80073- if (put_user(len, optlen) ||
80074- copy_to_user(optval, &raw_sk(sk)->filter, len))
80075+ filter = raw_sk(sk)->filter;
80076+ if (put_user(len, optlen) || len > sizeof filter ||
80077+ copy_to_user(optval, &filter, len))
80078 goto out;
80079 ret = 0;
80080 out: return ret;
80081@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80082 sk_wmem_alloc_get(sp),
80083 sk_rmem_alloc_get(sp),
80084 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80085- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80086+ atomic_read(&sp->sk_refcnt),
80087+#ifdef CONFIG_GRKERNSEC_HIDESYM
80088+ NULL,
80089+#else
80090+ sp,
80091+#endif
80092+ atomic_read_unchecked(&sp->sk_drops));
80093 }
80094
80095 static int raw_seq_show(struct seq_file *seq, void *v)
80096diff --git a/net/ipv4/route.c b/net/ipv4/route.c
80097index 58f141b..b759702 100644
80098--- a/net/ipv4/route.c
80099+++ b/net/ipv4/route.c
80100@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
80101
80102 static inline int rt_genid(struct net *net)
80103 {
80104- return atomic_read(&net->ipv4.rt_genid);
80105+ return atomic_read_unchecked(&net->ipv4.rt_genid);
80106 }
80107
80108 #ifdef CONFIG_PROC_FS
80109@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
80110 unsigned char shuffle;
80111
80112 get_random_bytes(&shuffle, sizeof(shuffle));
80113- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
80114+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
80115 }
80116
80117 /*
80118@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
80119
80120 static __net_init int rt_secret_timer_init(struct net *net)
80121 {
80122- atomic_set(&net->ipv4.rt_genid,
80123+ atomic_set_unchecked(&net->ipv4.rt_genid,
80124 (int) ((num_physpages ^ (num_physpages>>8)) ^
80125 (jiffies ^ (jiffies >> 7))));
80126
80127diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
80128index f095659..adc892a 100644
80129--- a/net/ipv4/tcp.c
80130+++ b/net/ipv4/tcp.c
80131@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
80132 int val;
80133 int err = 0;
80134
80135+ pax_track_stack();
80136+
80137 /* This is a string value all the others are int's */
80138 if (optname == TCP_CONGESTION) {
80139 char name[TCP_CA_NAME_MAX];
80140@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
80141 struct tcp_sock *tp = tcp_sk(sk);
80142 int val, len;
80143
80144+ pax_track_stack();
80145+
80146 if (get_user(len, optlen))
80147 return -EFAULT;
80148
80149diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
80150index 6fc7961..33bad4a 100644
80151--- a/net/ipv4/tcp_ipv4.c
80152+++ b/net/ipv4/tcp_ipv4.c
80153@@ -85,6 +85,9 @@
80154 int sysctl_tcp_tw_reuse __read_mostly;
80155 int sysctl_tcp_low_latency __read_mostly;
80156
80157+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80158+extern int grsec_enable_blackhole;
80159+#endif
80160
80161 #ifdef CONFIG_TCP_MD5SIG
80162 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
80163@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
80164 return 0;
80165
80166 reset:
80167+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80168+ if (!grsec_enable_blackhole)
80169+#endif
80170 tcp_v4_send_reset(rsk, skb);
80171 discard:
80172 kfree_skb(skb);
80173@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
80174 TCP_SKB_CB(skb)->sacked = 0;
80175
80176 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80177- if (!sk)
80178+ if (!sk) {
80179+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80180+ ret = 1;
80181+#endif
80182 goto no_tcp_socket;
80183+ }
80184
80185 process:
80186- if (sk->sk_state == TCP_TIME_WAIT)
80187+ if (sk->sk_state == TCP_TIME_WAIT) {
80188+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80189+ ret = 2;
80190+#endif
80191 goto do_time_wait;
80192+ }
80193
80194 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
80195 goto discard_and_relse;
80196@@ -1651,6 +1665,10 @@ no_tcp_socket:
80197 bad_packet:
80198 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80199 } else {
80200+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80201+ if (!grsec_enable_blackhole || (ret == 1 &&
80202+ (skb->dev->flags & IFF_LOOPBACK)))
80203+#endif
80204 tcp_v4_send_reset(NULL, skb);
80205 }
80206
80207@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
80208 0, /* non standard timer */
80209 0, /* open_requests have no inode */
80210 atomic_read(&sk->sk_refcnt),
80211+#ifdef CONFIG_GRKERNSEC_HIDESYM
80212+ NULL,
80213+#else
80214 req,
80215+#endif
80216 len);
80217 }
80218
80219@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
80220 sock_i_uid(sk),
80221 icsk->icsk_probes_out,
80222 sock_i_ino(sk),
80223- atomic_read(&sk->sk_refcnt), sk,
80224+ atomic_read(&sk->sk_refcnt),
80225+#ifdef CONFIG_GRKERNSEC_HIDESYM
80226+ NULL,
80227+#else
80228+ sk,
80229+#endif
80230 jiffies_to_clock_t(icsk->icsk_rto),
80231 jiffies_to_clock_t(icsk->icsk_ack.ato),
80232 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
80233@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
80234 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
80235 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
80236 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80237- atomic_read(&tw->tw_refcnt), tw, len);
80238+ atomic_read(&tw->tw_refcnt),
80239+#ifdef CONFIG_GRKERNSEC_HIDESYM
80240+ NULL,
80241+#else
80242+ tw,
80243+#endif
80244+ len);
80245 }
80246
80247 #define TMPSZ 150
80248diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
80249index 4c03598..e09a8e8 100644
80250--- a/net/ipv4/tcp_minisocks.c
80251+++ b/net/ipv4/tcp_minisocks.c
80252@@ -26,6 +26,10 @@
80253 #include <net/inet_common.h>
80254 #include <net/xfrm.h>
80255
80256+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80257+extern int grsec_enable_blackhole;
80258+#endif
80259+
80260 #ifdef CONFIG_SYSCTL
80261 #define SYNC_INIT 0 /* let the user enable it */
80262 #else
80263@@ -672,6 +676,10 @@ listen_overflow:
80264
80265 embryonic_reset:
80266 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
80267+
80268+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80269+ if (!grsec_enable_blackhole)
80270+#endif
80271 if (!(flg & TCP_FLAG_RST))
80272 req->rsk_ops->send_reset(sk, skb);
80273
80274diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
80275index af83bdf..ec91cb2 100644
80276--- a/net/ipv4/tcp_output.c
80277+++ b/net/ipv4/tcp_output.c
80278@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
80279 __u8 *md5_hash_location;
80280 int mss;
80281
80282+ pax_track_stack();
80283+
80284 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
80285 if (skb == NULL)
80286 return NULL;
80287diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
80288index 59f5b5e..193860f 100644
80289--- a/net/ipv4/tcp_probe.c
80290+++ b/net/ipv4/tcp_probe.c
80291@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
80292 if (cnt + width >= len)
80293 break;
80294
80295- if (copy_to_user(buf + cnt, tbuf, width))
80296+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
80297 return -EFAULT;
80298 cnt += width;
80299 }
80300diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
80301index 57d5501..a9ed13a 100644
80302--- a/net/ipv4/tcp_timer.c
80303+++ b/net/ipv4/tcp_timer.c
80304@@ -21,6 +21,10 @@
80305 #include <linux/module.h>
80306 #include <net/tcp.h>
80307
80308+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80309+extern int grsec_lastack_retries;
80310+#endif
80311+
80312 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
80313 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
80314 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
80315@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
80316 }
80317 }
80318
80319+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80320+ if ((sk->sk_state == TCP_LAST_ACK) &&
80321+ (grsec_lastack_retries > 0) &&
80322+ (grsec_lastack_retries < retry_until))
80323+ retry_until = grsec_lastack_retries;
80324+#endif
80325+
80326 if (retransmits_timed_out(sk, retry_until)) {
80327 /* Has it gone just too far? */
80328 tcp_write_err(sk);
80329diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
80330index 0ac8833..58d8c43 100644
80331--- a/net/ipv4/udp.c
80332+++ b/net/ipv4/udp.c
80333@@ -86,6 +86,7 @@
80334 #include <linux/types.h>
80335 #include <linux/fcntl.h>
80336 #include <linux/module.h>
80337+#include <linux/security.h>
80338 #include <linux/socket.h>
80339 #include <linux/sockios.h>
80340 #include <linux/igmp.h>
80341@@ -106,6 +107,10 @@
80342 #include <net/xfrm.h>
80343 #include "udp_impl.h"
80344
80345+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80346+extern int grsec_enable_blackhole;
80347+#endif
80348+
80349 struct udp_table udp_table;
80350 EXPORT_SYMBOL(udp_table);
80351
80352@@ -371,6 +376,9 @@ found:
80353 return s;
80354 }
80355
80356+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
80357+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
80358+
80359 /*
80360 * This routine is called by the ICMP module when it gets some
80361 * sort of error condition. If err < 0 then the socket should
80362@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
80363 dport = usin->sin_port;
80364 if (dport == 0)
80365 return -EINVAL;
80366+
80367+ err = gr_search_udp_sendmsg(sk, usin);
80368+ if (err)
80369+ return err;
80370 } else {
80371 if (sk->sk_state != TCP_ESTABLISHED)
80372 return -EDESTADDRREQ;
80373+
80374+ err = gr_search_udp_sendmsg(sk, NULL);
80375+ if (err)
80376+ return err;
80377+
80378 daddr = inet->daddr;
80379 dport = inet->dport;
80380 /* Open fast path for connected socket.
80381@@ -945,6 +962,10 @@ try_again:
80382 if (!skb)
80383 goto out;
80384
80385+ err = gr_search_udp_recvmsg(sk, skb);
80386+ if (err)
80387+ goto out_free;
80388+
80389 ulen = skb->len - sizeof(struct udphdr);
80390 copied = len;
80391 if (copied > ulen)
80392@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80393 if (rc == -ENOMEM) {
80394 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80395 is_udplite);
80396- atomic_inc(&sk->sk_drops);
80397+ atomic_inc_unchecked(&sk->sk_drops);
80398 }
80399 goto drop;
80400 }
80401@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80402 goto csum_error;
80403
80404 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80405+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80406+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80407+#endif
80408 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
80409
80410 /*
80411@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
80412 sk_wmem_alloc_get(sp),
80413 sk_rmem_alloc_get(sp),
80414 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80415- atomic_read(&sp->sk_refcnt), sp,
80416- atomic_read(&sp->sk_drops), len);
80417+ atomic_read(&sp->sk_refcnt),
80418+#ifdef CONFIG_GRKERNSEC_HIDESYM
80419+ NULL,
80420+#else
80421+ sp,
80422+#endif
80423+ atomic_read_unchecked(&sp->sk_drops), len);
80424 }
80425
80426 int udp4_seq_show(struct seq_file *seq, void *v)
80427diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
80428index 8ac3d09..fc58c5f 100644
80429--- a/net/ipv6/addrconf.c
80430+++ b/net/ipv6/addrconf.c
80431@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
80432 p.iph.ihl = 5;
80433 p.iph.protocol = IPPROTO_IPV6;
80434 p.iph.ttl = 64;
80435- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
80436+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
80437
80438 if (ops->ndo_do_ioctl) {
80439 mm_segment_t oldfs = get_fs();
80440diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
80441index cc4797d..7cfdfcc 100644
80442--- a/net/ipv6/inet6_connection_sock.c
80443+++ b/net/ipv6/inet6_connection_sock.c
80444@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
80445 #ifdef CONFIG_XFRM
80446 {
80447 struct rt6_info *rt = (struct rt6_info *)dst;
80448- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
80449+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
80450 }
80451 #endif
80452 }
80453@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
80454 #ifdef CONFIG_XFRM
80455 if (dst) {
80456 struct rt6_info *rt = (struct rt6_info *)dst;
80457- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
80458+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
80459 sk->sk_dst_cache = NULL;
80460 dst_release(dst);
80461 dst = NULL;
80462diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
80463index 093e9b2..f72cddb 100644
80464--- a/net/ipv6/inet6_hashtables.c
80465+++ b/net/ipv6/inet6_hashtables.c
80466@@ -119,7 +119,7 @@ out:
80467 }
80468 EXPORT_SYMBOL(__inet6_lookup_established);
80469
80470-static int inline compute_score(struct sock *sk, struct net *net,
80471+static inline int compute_score(struct sock *sk, struct net *net,
80472 const unsigned short hnum,
80473 const struct in6_addr *daddr,
80474 const int dif)
80475diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
80476index 4f7aaf6..f7acf45 100644
80477--- a/net/ipv6/ipv6_sockglue.c
80478+++ b/net/ipv6/ipv6_sockglue.c
80479@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
80480 int val, valbool;
80481 int retv = -ENOPROTOOPT;
80482
80483+ pax_track_stack();
80484+
80485 if (optval == NULL)
80486 val=0;
80487 else {
80488@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80489 int len;
80490 int val;
80491
80492+ pax_track_stack();
80493+
80494 if (ip6_mroute_opt(optname))
80495 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
80496
80497@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80498 if (sk->sk_type != SOCK_STREAM)
80499 return -ENOPROTOOPT;
80500
80501- msg.msg_control = optval;
80502+ msg.msg_control = (void __force_kernel *)optval;
80503 msg.msg_controllen = len;
80504 msg.msg_flags = 0;
80505
80506diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
80507index 1cf3f0c..1d4376f 100644
80508--- a/net/ipv6/netfilter/ip6_queue.c
80509+++ b/net/ipv6/netfilter/ip6_queue.c
80510@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
80511
80512 if (v->data_len < sizeof(*user_iph))
80513 return 0;
80514+ if (v->data_len > 65535)
80515+ return -EMSGSIZE;
80516+
80517 diff = v->data_len - e->skb->len;
80518 if (diff < 0) {
80519 if (pskb_trim(e->skb, v->data_len))
80520@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
80521 static inline void
80522 __ipq_rcv_skb(struct sk_buff *skb)
80523 {
80524- int status, type, pid, flags, nlmsglen, skblen;
80525+ int status, type, pid, flags;
80526+ unsigned int nlmsglen, skblen;
80527 struct nlmsghdr *nlh;
80528
80529 skblen = skb->len;
80530diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
80531index 78b5a36..7f37433 100644
80532--- a/net/ipv6/netfilter/ip6_tables.c
80533+++ b/net/ipv6/netfilter/ip6_tables.c
80534@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80535 private = &tmp;
80536 }
80537 #endif
80538+ memset(&info, 0, sizeof(info));
80539 info.valid_hooks = t->valid_hooks;
80540 memcpy(info.hook_entry, private->hook_entry,
80541 sizeof(info.hook_entry));
80542diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
80543index 4f24570..b813b34 100644
80544--- a/net/ipv6/raw.c
80545+++ b/net/ipv6/raw.c
80546@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
80547 {
80548 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
80549 skb_checksum_complete(skb)) {
80550- atomic_inc(&sk->sk_drops);
80551+ atomic_inc_unchecked(&sk->sk_drops);
80552 kfree_skb(skb);
80553 return NET_RX_DROP;
80554 }
80555
80556 /* Charge it to the socket. */
80557 if (sock_queue_rcv_skb(sk,skb)<0) {
80558- atomic_inc(&sk->sk_drops);
80559+ atomic_inc_unchecked(&sk->sk_drops);
80560 kfree_skb(skb);
80561 return NET_RX_DROP;
80562 }
80563@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80564 struct raw6_sock *rp = raw6_sk(sk);
80565
80566 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
80567- atomic_inc(&sk->sk_drops);
80568+ atomic_inc_unchecked(&sk->sk_drops);
80569 kfree_skb(skb);
80570 return NET_RX_DROP;
80571 }
80572@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80573
80574 if (inet->hdrincl) {
80575 if (skb_checksum_complete(skb)) {
80576- atomic_inc(&sk->sk_drops);
80577+ atomic_inc_unchecked(&sk->sk_drops);
80578 kfree_skb(skb);
80579 return NET_RX_DROP;
80580 }
80581@@ -518,7 +518,7 @@ csum_copy_err:
80582 as some normal condition.
80583 */
80584 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
80585- atomic_inc(&sk->sk_drops);
80586+ atomic_inc_unchecked(&sk->sk_drops);
80587 goto out;
80588 }
80589
80590@@ -600,7 +600,7 @@ out:
80591 return err;
80592 }
80593
80594-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
80595+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
80596 struct flowi *fl, struct rt6_info *rt,
80597 unsigned int flags)
80598 {
80599@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
80600 u16 proto;
80601 int err;
80602
80603+ pax_track_stack();
80604+
80605 /* Rough check on arithmetic overflow,
80606 better check is made in ip6_append_data().
80607 */
80608@@ -916,12 +918,17 @@ do_confirm:
80609 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
80610 char __user *optval, int optlen)
80611 {
80612+ struct icmp6_filter filter;
80613+
80614 switch (optname) {
80615 case ICMPV6_FILTER:
80616+ if (optlen < 0)
80617+ return -EINVAL;
80618 if (optlen > sizeof(struct icmp6_filter))
80619 optlen = sizeof(struct icmp6_filter);
80620- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
80621+ if (copy_from_user(&filter, optval, optlen))
80622 return -EFAULT;
80623+ raw6_sk(sk)->filter = filter;
80624 return 0;
80625 default:
80626 return -ENOPROTOOPT;
80627@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80628 char __user *optval, int __user *optlen)
80629 {
80630 int len;
80631+ struct icmp6_filter filter;
80632
80633 switch (optname) {
80634 case ICMPV6_FILTER:
80635@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80636 len = sizeof(struct icmp6_filter);
80637 if (put_user(len, optlen))
80638 return -EFAULT;
80639- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
80640+ filter = raw6_sk(sk)->filter;
80641+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
80642 return -EFAULT;
80643 return 0;
80644 default:
80645@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80646 0, 0L, 0,
80647 sock_i_uid(sp), 0,
80648 sock_i_ino(sp),
80649- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80650+ atomic_read(&sp->sk_refcnt),
80651+#ifdef CONFIG_GRKERNSEC_HIDESYM
80652+ NULL,
80653+#else
80654+ sp,
80655+#endif
80656+ atomic_read_unchecked(&sp->sk_drops));
80657 }
80658
80659 static int raw6_seq_show(struct seq_file *seq, void *v)
80660diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
80661index faae6df..d4430c1 100644
80662--- a/net/ipv6/tcp_ipv6.c
80663+++ b/net/ipv6/tcp_ipv6.c
80664@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
80665 }
80666 #endif
80667
80668+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80669+extern int grsec_enable_blackhole;
80670+#endif
80671+
80672 static void tcp_v6_hash(struct sock *sk)
80673 {
80674 if (sk->sk_state != TCP_CLOSE) {
80675@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
80676 return 0;
80677
80678 reset:
80679+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80680+ if (!grsec_enable_blackhole)
80681+#endif
80682 tcp_v6_send_reset(sk, skb);
80683 discard:
80684 if (opt_skb)
80685@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
80686 TCP_SKB_CB(skb)->sacked = 0;
80687
80688 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80689- if (!sk)
80690+ if (!sk) {
80691+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80692+ ret = 1;
80693+#endif
80694 goto no_tcp_socket;
80695+ }
80696
80697 process:
80698- if (sk->sk_state == TCP_TIME_WAIT)
80699+ if (sk->sk_state == TCP_TIME_WAIT) {
80700+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80701+ ret = 2;
80702+#endif
80703 goto do_time_wait;
80704+ }
80705
80706 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
80707 goto discard_and_relse;
80708@@ -1701,6 +1716,10 @@ no_tcp_socket:
80709 bad_packet:
80710 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80711 } else {
80712+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80713+ if (!grsec_enable_blackhole || (ret == 1 &&
80714+ (skb->dev->flags & IFF_LOOPBACK)))
80715+#endif
80716 tcp_v6_send_reset(NULL, skb);
80717 }
80718
80719@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
80720 uid,
80721 0, /* non standard timer */
80722 0, /* open_requests have no inode */
80723- 0, req);
80724+ 0,
80725+#ifdef CONFIG_GRKERNSEC_HIDESYM
80726+ NULL
80727+#else
80728+ req
80729+#endif
80730+ );
80731 }
80732
80733 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
80734@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
80735 sock_i_uid(sp),
80736 icsk->icsk_probes_out,
80737 sock_i_ino(sp),
80738- atomic_read(&sp->sk_refcnt), sp,
80739+ atomic_read(&sp->sk_refcnt),
80740+#ifdef CONFIG_GRKERNSEC_HIDESYM
80741+ NULL,
80742+#else
80743+ sp,
80744+#endif
80745 jiffies_to_clock_t(icsk->icsk_rto),
80746 jiffies_to_clock_t(icsk->icsk_ack.ato),
80747 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
80748@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
80749 dest->s6_addr32[2], dest->s6_addr32[3], destp,
80750 tw->tw_substate, 0, 0,
80751 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80752- atomic_read(&tw->tw_refcnt), tw);
80753+ atomic_read(&tw->tw_refcnt),
80754+#ifdef CONFIG_GRKERNSEC_HIDESYM
80755+ NULL
80756+#else
80757+ tw
80758+#endif
80759+ );
80760 }
80761
80762 static int tcp6_seq_show(struct seq_file *seq, void *v)
80763diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
80764index 9cc6289..052c521 100644
80765--- a/net/ipv6/udp.c
80766+++ b/net/ipv6/udp.c
80767@@ -49,6 +49,10 @@
80768 #include <linux/seq_file.h>
80769 #include "udp_impl.h"
80770
80771+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80772+extern int grsec_enable_blackhole;
80773+#endif
80774+
80775 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
80776 {
80777 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80778@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
80779 if (rc == -ENOMEM) {
80780 UDP6_INC_STATS_BH(sock_net(sk),
80781 UDP_MIB_RCVBUFERRORS, is_udplite);
80782- atomic_inc(&sk->sk_drops);
80783+ atomic_inc_unchecked(&sk->sk_drops);
80784 }
80785 goto drop;
80786 }
80787@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80788 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
80789 proto == IPPROTO_UDPLITE);
80790
80791+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80792+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80793+#endif
80794 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
80795
80796 kfree_skb(skb);
80797@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80798 0, 0L, 0,
80799 sock_i_uid(sp), 0,
80800 sock_i_ino(sp),
80801- atomic_read(&sp->sk_refcnt), sp,
80802- atomic_read(&sp->sk_drops));
80803+ atomic_read(&sp->sk_refcnt),
80804+#ifdef CONFIG_GRKERNSEC_HIDESYM
80805+ NULL,
80806+#else
80807+ sp,
80808+#endif
80809+ atomic_read_unchecked(&sp->sk_drops));
80810 }
80811
80812 int udp6_seq_show(struct seq_file *seq, void *v)
80813diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80814index 811984d..11f59b7 100644
80815--- a/net/irda/ircomm/ircomm_tty.c
80816+++ b/net/irda/ircomm/ircomm_tty.c
80817@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80818 add_wait_queue(&self->open_wait, &wait);
80819
80820 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80821- __FILE__,__LINE__, tty->driver->name, self->open_count );
80822+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
80823
80824 /* As far as I can see, we protect open_count - Jean II */
80825 spin_lock_irqsave(&self->spinlock, flags);
80826 if (!tty_hung_up_p(filp)) {
80827 extra_count = 1;
80828- self->open_count--;
80829+ local_dec(&self->open_count);
80830 }
80831 spin_unlock_irqrestore(&self->spinlock, flags);
80832- self->blocked_open++;
80833+ local_inc(&self->blocked_open);
80834
80835 while (1) {
80836 if (tty->termios->c_cflag & CBAUD) {
80837@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80838 }
80839
80840 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80841- __FILE__,__LINE__, tty->driver->name, self->open_count );
80842+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
80843
80844 schedule();
80845 }
80846@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80847 if (extra_count) {
80848 /* ++ is not atomic, so this should be protected - Jean II */
80849 spin_lock_irqsave(&self->spinlock, flags);
80850- self->open_count++;
80851+ local_inc(&self->open_count);
80852 spin_unlock_irqrestore(&self->spinlock, flags);
80853 }
80854- self->blocked_open--;
80855+ local_dec(&self->blocked_open);
80856
80857 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80858- __FILE__,__LINE__, tty->driver->name, self->open_count);
80859+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
80860
80861 if (!retval)
80862 self->flags |= ASYNC_NORMAL_ACTIVE;
80863@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80864 }
80865 /* ++ is not atomic, so this should be protected - Jean II */
80866 spin_lock_irqsave(&self->spinlock, flags);
80867- self->open_count++;
80868+ local_inc(&self->open_count);
80869
80870 tty->driver_data = self;
80871 self->tty = tty;
80872 spin_unlock_irqrestore(&self->spinlock, flags);
80873
80874 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80875- self->line, self->open_count);
80876+ self->line, local_read(&self->open_count));
80877
80878 /* Not really used by us, but lets do it anyway */
80879 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80880@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80881 return;
80882 }
80883
80884- if ((tty->count == 1) && (self->open_count != 1)) {
80885+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
80886 /*
80887 * Uh, oh. tty->count is 1, which means that the tty
80888 * structure will be freed. state->count should always
80889@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80890 */
80891 IRDA_DEBUG(0, "%s(), bad serial port count; "
80892 "tty->count is 1, state->count is %d\n", __func__ ,
80893- self->open_count);
80894- self->open_count = 1;
80895+ local_read(&self->open_count));
80896+ local_set(&self->open_count, 1);
80897 }
80898
80899- if (--self->open_count < 0) {
80900+ if (local_dec_return(&self->open_count) < 0) {
80901 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
80902- __func__, self->line, self->open_count);
80903- self->open_count = 0;
80904+ __func__, self->line, local_read(&self->open_count));
80905+ local_set(&self->open_count, 0);
80906 }
80907- if (self->open_count) {
80908+ if (local_read(&self->open_count)) {
80909 spin_unlock_irqrestore(&self->spinlock, flags);
80910
80911 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
80912@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
80913 tty->closing = 0;
80914 self->tty = NULL;
80915
80916- if (self->blocked_open) {
80917+ if (local_read(&self->blocked_open)) {
80918 if (self->close_delay)
80919 schedule_timeout_interruptible(self->close_delay);
80920 wake_up_interruptible(&self->open_wait);
80921@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
80922 spin_lock_irqsave(&self->spinlock, flags);
80923 self->flags &= ~ASYNC_NORMAL_ACTIVE;
80924 self->tty = NULL;
80925- self->open_count = 0;
80926+ local_set(&self->open_count, 0);
80927 spin_unlock_irqrestore(&self->spinlock, flags);
80928
80929 wake_up_interruptible(&self->open_wait);
80930@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
80931 seq_putc(m, '\n');
80932
80933 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
80934- seq_printf(m, "Open count: %d\n", self->open_count);
80935+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
80936 seq_printf(m, "Max data size: %d\n", self->max_data_size);
80937 seq_printf(m, "Max header size: %d\n", self->max_header_size);
80938
80939diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
80940index bada1b9..f325943 100644
80941--- a/net/iucv/af_iucv.c
80942+++ b/net/iucv/af_iucv.c
80943@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
80944
80945 write_lock_bh(&iucv_sk_list.lock);
80946
80947- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
80948+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80949 while (__iucv_get_sock_by_name(name)) {
80950 sprintf(name, "%08x",
80951- atomic_inc_return(&iucv_sk_list.autobind_name));
80952+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80953 }
80954
80955 write_unlock_bh(&iucv_sk_list.lock);
80956diff --git a/net/key/af_key.c b/net/key/af_key.c
80957index 4e98193..439b449 100644
80958--- a/net/key/af_key.c
80959+++ b/net/key/af_key.c
80960@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
80961 struct xfrm_migrate m[XFRM_MAX_DEPTH];
80962 struct xfrm_kmaddress k;
80963
80964+ pax_track_stack();
80965+
80966 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
80967 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
80968 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
80969@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
80970 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
80971 else
80972 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
80973+#ifdef CONFIG_GRKERNSEC_HIDESYM
80974+ NULL,
80975+#else
80976 s,
80977+#endif
80978 atomic_read(&s->sk_refcnt),
80979 sk_rmem_alloc_get(s),
80980 sk_wmem_alloc_get(s),
80981diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
80982index bda96d1..c038b72 100644
80983--- a/net/lapb/lapb_iface.c
80984+++ b/net/lapb/lapb_iface.c
80985@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
80986 goto out;
80987
80988 lapb->dev = dev;
80989- lapb->callbacks = *callbacks;
80990+ lapb->callbacks = callbacks;
80991
80992 __lapb_insert_cb(lapb);
80993
80994@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
80995
80996 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
80997 {
80998- if (lapb->callbacks.connect_confirmation)
80999- lapb->callbacks.connect_confirmation(lapb->dev, reason);
81000+ if (lapb->callbacks->connect_confirmation)
81001+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
81002 }
81003
81004 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
81005 {
81006- if (lapb->callbacks.connect_indication)
81007- lapb->callbacks.connect_indication(lapb->dev, reason);
81008+ if (lapb->callbacks->connect_indication)
81009+ lapb->callbacks->connect_indication(lapb->dev, reason);
81010 }
81011
81012 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
81013 {
81014- if (lapb->callbacks.disconnect_confirmation)
81015- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
81016+ if (lapb->callbacks->disconnect_confirmation)
81017+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
81018 }
81019
81020 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
81021 {
81022- if (lapb->callbacks.disconnect_indication)
81023- lapb->callbacks.disconnect_indication(lapb->dev, reason);
81024+ if (lapb->callbacks->disconnect_indication)
81025+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
81026 }
81027
81028 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
81029 {
81030- if (lapb->callbacks.data_indication)
81031- return lapb->callbacks.data_indication(lapb->dev, skb);
81032+ if (lapb->callbacks->data_indication)
81033+ return lapb->callbacks->data_indication(lapb->dev, skb);
81034
81035 kfree_skb(skb);
81036 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
81037@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
81038 {
81039 int used = 0;
81040
81041- if (lapb->callbacks.data_transmit) {
81042- lapb->callbacks.data_transmit(lapb->dev, skb);
81043+ if (lapb->callbacks->data_transmit) {
81044+ lapb->callbacks->data_transmit(lapb->dev, skb);
81045 used = 1;
81046 }
81047
81048diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
81049index fe2d3f8..e57f683 100644
81050--- a/net/mac80211/cfg.c
81051+++ b/net/mac80211/cfg.c
81052@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
81053 return err;
81054 }
81055
81056-struct cfg80211_ops mac80211_config_ops = {
81057+const struct cfg80211_ops mac80211_config_ops = {
81058 .add_virtual_intf = ieee80211_add_iface,
81059 .del_virtual_intf = ieee80211_del_iface,
81060 .change_virtual_intf = ieee80211_change_iface,
81061diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
81062index 7d7879f..2d51f62 100644
81063--- a/net/mac80211/cfg.h
81064+++ b/net/mac80211/cfg.h
81065@@ -4,6 +4,6 @@
81066 #ifndef __CFG_H
81067 #define __CFG_H
81068
81069-extern struct cfg80211_ops mac80211_config_ops;
81070+extern const struct cfg80211_ops mac80211_config_ops;
81071
81072 #endif /* __CFG_H */
81073diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
81074index 99c7525..9cb4937 100644
81075--- a/net/mac80211/debugfs_key.c
81076+++ b/net/mac80211/debugfs_key.c
81077@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
81078 size_t count, loff_t *ppos)
81079 {
81080 struct ieee80211_key *key = file->private_data;
81081- int i, res, bufsize = 2 * key->conf.keylen + 2;
81082+ int i, bufsize = 2 * key->conf.keylen + 2;
81083 char *buf = kmalloc(bufsize, GFP_KERNEL);
81084 char *p = buf;
81085+ ssize_t res;
81086+
81087+ if (buf == NULL)
81088+ return -ENOMEM;
81089
81090 for (i = 0; i < key->conf.keylen; i++)
81091 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
81092diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
81093index 33a2e89..08650c8 100644
81094--- a/net/mac80211/debugfs_sta.c
81095+++ b/net/mac80211/debugfs_sta.c
81096@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
81097 int i;
81098 struct sta_info *sta = file->private_data;
81099
81100+ pax_track_stack();
81101+
81102 spin_lock_bh(&sta->lock);
81103 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
81104 sta->ampdu_mlme.dialog_token_allocator + 1);
81105diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
81106index ca62bfe..6657a03 100644
81107--- a/net/mac80211/ieee80211_i.h
81108+++ b/net/mac80211/ieee80211_i.h
81109@@ -25,6 +25,7 @@
81110 #include <linux/etherdevice.h>
81111 #include <net/cfg80211.h>
81112 #include <net/mac80211.h>
81113+#include <asm/local.h>
81114 #include "key.h"
81115 #include "sta_info.h"
81116
81117@@ -635,7 +636,7 @@ struct ieee80211_local {
81118 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
81119 spinlock_t queue_stop_reason_lock;
81120
81121- int open_count;
81122+ local_t open_count;
81123 int monitors, cooked_mntrs;
81124 /* number of interfaces with corresponding FIF_ flags */
81125 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
81126diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
81127index 079c500..eb3c6d4 100644
81128--- a/net/mac80211/iface.c
81129+++ b/net/mac80211/iface.c
81130@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
81131 break;
81132 }
81133
81134- if (local->open_count == 0) {
81135+ if (local_read(&local->open_count) == 0) {
81136 res = drv_start(local);
81137 if (res)
81138 goto err_del_bss;
81139@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
81140 * Validate the MAC address for this device.
81141 */
81142 if (!is_valid_ether_addr(dev->dev_addr)) {
81143- if (!local->open_count)
81144+ if (!local_read(&local->open_count))
81145 drv_stop(local);
81146 return -EADDRNOTAVAIL;
81147 }
81148@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
81149
81150 hw_reconf_flags |= __ieee80211_recalc_idle(local);
81151
81152- local->open_count++;
81153+ local_inc(&local->open_count);
81154 if (hw_reconf_flags) {
81155 ieee80211_hw_config(local, hw_reconf_flags);
81156 /*
81157@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
81158 err_del_interface:
81159 drv_remove_interface(local, &conf);
81160 err_stop:
81161- if (!local->open_count)
81162+ if (!local_read(&local->open_count))
81163 drv_stop(local);
81164 err_del_bss:
81165 sdata->bss = NULL;
81166@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
81167 WARN_ON(!list_empty(&sdata->u.ap.vlans));
81168 }
81169
81170- local->open_count--;
81171+ local_dec(&local->open_count);
81172
81173 switch (sdata->vif.type) {
81174 case NL80211_IFTYPE_AP_VLAN:
81175@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
81176
81177 ieee80211_recalc_ps(local, -1);
81178
81179- if (local->open_count == 0) {
81180+ if (local_read(&local->open_count) == 0) {
81181 ieee80211_clear_tx_pending(local);
81182 ieee80211_stop_device(local);
81183
81184diff --git a/net/mac80211/main.c b/net/mac80211/main.c
81185index 2dfe176..74e4388 100644
81186--- a/net/mac80211/main.c
81187+++ b/net/mac80211/main.c
81188@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
81189 local->hw.conf.power_level = power;
81190 }
81191
81192- if (changed && local->open_count) {
81193+ if (changed && local_read(&local->open_count)) {
81194 ret = drv_config(local, changed);
81195 /*
81196 * Goal:
81197diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
81198index e67eea7..fcc227e 100644
81199--- a/net/mac80211/mlme.c
81200+++ b/net/mac80211/mlme.c
81201@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
81202 bool have_higher_than_11mbit = false, newsta = false;
81203 u16 ap_ht_cap_flags;
81204
81205+ pax_track_stack();
81206+
81207 /*
81208 * AssocResp and ReassocResp have identical structure, so process both
81209 * of them in this function.
81210diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
81211index e535f1c..4d733d1 100644
81212--- a/net/mac80211/pm.c
81213+++ b/net/mac80211/pm.c
81214@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
81215 }
81216
81217 /* stop hardware - this must stop RX */
81218- if (local->open_count)
81219+ if (local_read(&local->open_count))
81220 ieee80211_stop_device(local);
81221
81222 local->suspended = true;
81223diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
81224index b33efc4..0a2efb6 100644
81225--- a/net/mac80211/rate.c
81226+++ b/net/mac80211/rate.c
81227@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
81228 struct rate_control_ref *ref, *old;
81229
81230 ASSERT_RTNL();
81231- if (local->open_count)
81232+ if (local_read(&local->open_count))
81233 return -EBUSY;
81234
81235 ref = rate_control_alloc(name, local);
81236diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
81237index b1d7904..57e4da7 100644
81238--- a/net/mac80211/tx.c
81239+++ b/net/mac80211/tx.c
81240@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
81241 return cpu_to_le16(dur);
81242 }
81243
81244-static int inline is_ieee80211_device(struct ieee80211_local *local,
81245+static inline int is_ieee80211_device(struct ieee80211_local *local,
81246 struct net_device *dev)
81247 {
81248 return local == wdev_priv(dev->ieee80211_ptr);
81249diff --git a/net/mac80211/util.c b/net/mac80211/util.c
81250index 31b1085..48fb26d 100644
81251--- a/net/mac80211/util.c
81252+++ b/net/mac80211/util.c
81253@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
81254 local->resuming = true;
81255
81256 /* restart hardware */
81257- if (local->open_count) {
81258+ if (local_read(&local->open_count)) {
81259 /*
81260 * Upon resume hardware can sometimes be goofy due to
81261 * various platform / driver / bus issues, so restarting
81262diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
81263index 634d14a..b35a608 100644
81264--- a/net/netfilter/Kconfig
81265+++ b/net/netfilter/Kconfig
81266@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
81267
81268 To compile it as a module, choose M here. If unsure, say N.
81269
81270+config NETFILTER_XT_MATCH_GRADM
81271+ tristate '"gradm" match support'
81272+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
81273+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
81274+ ---help---
81275+ The gradm match allows to match on grsecurity RBAC being enabled.
81276+ It is useful when iptables rules are applied early on bootup to
81277+ prevent connections to the machine (except from a trusted host)
81278+ while the RBAC system is disabled.
81279+
81280 config NETFILTER_XT_MATCH_HASHLIMIT
81281 tristate '"hashlimit" match support'
81282 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
81283diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
81284index 49f62ee..a17b2c6 100644
81285--- a/net/netfilter/Makefile
81286+++ b/net/netfilter/Makefile
81287@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
81288 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
81289 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
81290 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
81291+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
81292 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
81293 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
81294 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
81295diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
81296index 3c7e427..724043c 100644
81297--- a/net/netfilter/ipvs/ip_vs_app.c
81298+++ b/net/netfilter/ipvs/ip_vs_app.c
81299@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
81300 .open = ip_vs_app_open,
81301 .read = seq_read,
81302 .llseek = seq_lseek,
81303- .release = seq_release,
81304+ .release = seq_release_net,
81305 };
81306 #endif
81307
81308diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
81309index 95682e5..457dbac 100644
81310--- a/net/netfilter/ipvs/ip_vs_conn.c
81311+++ b/net/netfilter/ipvs/ip_vs_conn.c
81312@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
81313 /* if the connection is not template and is created
81314 * by sync, preserve the activity flag.
81315 */
81316- cp->flags |= atomic_read(&dest->conn_flags) &
81317+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
81318 (~IP_VS_CONN_F_INACTIVE);
81319 else
81320- cp->flags |= atomic_read(&dest->conn_flags);
81321+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
81322 cp->dest = dest;
81323
81324 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
81325@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
81326 atomic_set(&cp->refcnt, 1);
81327
81328 atomic_set(&cp->n_control, 0);
81329- atomic_set(&cp->in_pkts, 0);
81330+ atomic_set_unchecked(&cp->in_pkts, 0);
81331
81332 atomic_inc(&ip_vs_conn_count);
81333 if (flags & IP_VS_CONN_F_NO_CPORT)
81334@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
81335 .open = ip_vs_conn_open,
81336 .read = seq_read,
81337 .llseek = seq_lseek,
81338- .release = seq_release,
81339+ .release = seq_release_net,
81340 };
81341
81342 static const char *ip_vs_origin_name(unsigned flags)
81343@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
81344 .open = ip_vs_conn_sync_open,
81345 .read = seq_read,
81346 .llseek = seq_lseek,
81347- .release = seq_release,
81348+ .release = seq_release_net,
81349 };
81350
81351 #endif
81352@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
81353
81354 /* Don't drop the entry if its number of incoming packets is not
81355 located in [0, 8] */
81356- i = atomic_read(&cp->in_pkts);
81357+ i = atomic_read_unchecked(&cp->in_pkts);
81358 if (i > 8 || i < 0) return 0;
81359
81360 if (!todrop_rate[i]) return 0;
81361diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
81362index b95699f..5fee919 100644
81363--- a/net/netfilter/ipvs/ip_vs_core.c
81364+++ b/net/netfilter/ipvs/ip_vs_core.c
81365@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
81366 ret = cp->packet_xmit(skb, cp, pp);
81367 /* do not touch skb anymore */
81368
81369- atomic_inc(&cp->in_pkts);
81370+ atomic_inc_unchecked(&cp->in_pkts);
81371 ip_vs_conn_put(cp);
81372 return ret;
81373 }
81374@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
81375 * Sync connection if it is about to close to
81376 * encorage the standby servers to update the connections timeout
81377 */
81378- pkts = atomic_add_return(1, &cp->in_pkts);
81379+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
81380 if (af == AF_INET &&
81381 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
81382 (((cp->protocol != IPPROTO_TCP ||
81383diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
81384index 02b2610..2d89424 100644
81385--- a/net/netfilter/ipvs/ip_vs_ctl.c
81386+++ b/net/netfilter/ipvs/ip_vs_ctl.c
81387@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
81388 ip_vs_rs_hash(dest);
81389 write_unlock_bh(&__ip_vs_rs_lock);
81390 }
81391- atomic_set(&dest->conn_flags, conn_flags);
81392+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
81393
81394 /* bind the service */
81395 if (!dest->svc) {
81396@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81397 " %-7s %-6d %-10d %-10d\n",
81398 &dest->addr.in6,
81399 ntohs(dest->port),
81400- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81401+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81402 atomic_read(&dest->weight),
81403 atomic_read(&dest->activeconns),
81404 atomic_read(&dest->inactconns));
81405@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81406 "%-7s %-6d %-10d %-10d\n",
81407 ntohl(dest->addr.ip),
81408 ntohs(dest->port),
81409- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81410+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81411 atomic_read(&dest->weight),
81412 atomic_read(&dest->activeconns),
81413 atomic_read(&dest->inactconns));
81414@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
81415 .open = ip_vs_info_open,
81416 .read = seq_read,
81417 .llseek = seq_lseek,
81418- .release = seq_release_private,
81419+ .release = seq_release_net,
81420 };
81421
81422 #endif
81423@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
81424 .open = ip_vs_stats_seq_open,
81425 .read = seq_read,
81426 .llseek = seq_lseek,
81427- .release = single_release,
81428+ .release = single_release_net,
81429 };
81430
81431 #endif
81432@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
81433
81434 entry.addr = dest->addr.ip;
81435 entry.port = dest->port;
81436- entry.conn_flags = atomic_read(&dest->conn_flags);
81437+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
81438 entry.weight = atomic_read(&dest->weight);
81439 entry.u_threshold = dest->u_threshold;
81440 entry.l_threshold = dest->l_threshold;
81441@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81442 unsigned char arg[128];
81443 int ret = 0;
81444
81445+ pax_track_stack();
81446+
81447 if (!capable(CAP_NET_ADMIN))
81448 return -EPERM;
81449
81450@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
81451 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
81452
81453 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
81454- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81455+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81456 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
81457 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
81458 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
81459diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
81460index e177f0d..55e8581 100644
81461--- a/net/netfilter/ipvs/ip_vs_sync.c
81462+++ b/net/netfilter/ipvs/ip_vs_sync.c
81463@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
81464
81465 if (opt)
81466 memcpy(&cp->in_seq, opt, sizeof(*opt));
81467- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81468+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81469 cp->state = state;
81470 cp->old_state = cp->state;
81471 /*
81472diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
81473index 30b3189..e2e4b55 100644
81474--- a/net/netfilter/ipvs/ip_vs_xmit.c
81475+++ b/net/netfilter/ipvs/ip_vs_xmit.c
81476@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
81477 else
81478 rc = NF_ACCEPT;
81479 /* do not touch skb anymore */
81480- atomic_inc(&cp->in_pkts);
81481+ atomic_inc_unchecked(&cp->in_pkts);
81482 goto out;
81483 }
81484
81485@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
81486 else
81487 rc = NF_ACCEPT;
81488 /* do not touch skb anymore */
81489- atomic_inc(&cp->in_pkts);
81490+ atomic_inc_unchecked(&cp->in_pkts);
81491 goto out;
81492 }
81493
81494diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
81495index d521718..d0fd7a1 100644
81496--- a/net/netfilter/nf_conntrack_netlink.c
81497+++ b/net/netfilter/nf_conntrack_netlink.c
81498@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
81499 static int
81500 ctnetlink_parse_tuple(const struct nlattr * const cda[],
81501 struct nf_conntrack_tuple *tuple,
81502- enum ctattr_tuple type, u_int8_t l3num)
81503+ enum ctattr_type type, u_int8_t l3num)
81504 {
81505 struct nlattr *tb[CTA_TUPLE_MAX+1];
81506 int err;
81507diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
81508index f900dc3..5e45346 100644
81509--- a/net/netfilter/nfnetlink_log.c
81510+++ b/net/netfilter/nfnetlink_log.c
81511@@ -68,7 +68,7 @@ struct nfulnl_instance {
81512 };
81513
81514 static DEFINE_RWLOCK(instances_lock);
81515-static atomic_t global_seq;
81516+static atomic_unchecked_t global_seq;
81517
81518 #define INSTANCE_BUCKETS 16
81519 static struct hlist_head instance_table[INSTANCE_BUCKETS];
81520@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
81521 /* global sequence number */
81522 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
81523 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
81524- htonl(atomic_inc_return(&global_seq)));
81525+ htonl(atomic_inc_return_unchecked(&global_seq)));
81526
81527 if (data_len) {
81528 struct nlattr *nla;
81529diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
81530new file mode 100644
81531index 0000000..b1bac76
81532--- /dev/null
81533+++ b/net/netfilter/xt_gradm.c
81534@@ -0,0 +1,51 @@
81535+/*
81536+ * gradm match for netfilter
81537